deps: update V8 to 6.6.346.23

PR-URL: https://github.com/nodejs/node/pull/19201
Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com>
Reviewed-By: Myles Borins <myles.borins@gmail.com>
Reviewed-By: Matteo Collina <matteo.collina@gmail.com>
Reviewed-By: Colin Ihrig <cjihrig@gmail.com>
This commit is contained in:
Myles Borins 2018-04-10 21:39:51 -04:00
parent 78cd826335
commit 12a1b9b804
No known key found for this signature in database
GPG Key ID: 933B01F40B5CA946
1331 changed files with 64699 additions and 50530 deletions

4
deps/v8/.gitignore vendored
View File

@ -36,8 +36,6 @@
/_*
/build
/buildtools
/gypfiles/.gold_plugin
/gypfiles/win_toolchain.json
/hydrogen.cfg
/obj
/out
@ -76,6 +74,8 @@
/tools/jsfunfuzz/jsfunfuzz
/tools/jsfunfuzz/jsfunfuzz.tar.gz
/tools/luci-go
/tools/mips_toolchain
/tools/mips_toolchain.tar.gz
/tools/oom_dump/oom_dump
/tools/oom_dump/oom_dump.o
/tools/swarming_client

1
deps/v8/AUTHORS vendored
View File

@ -136,6 +136,7 @@ Sanjoy Das <sanjoy@playingwithpointers.com>
Seo Sanghyeon <sanxiyn@gmail.com>
Stefan Penner <stefan.penner@gmail.com>
Sylvestre Ledru <sledru@mozilla.com>
Taketoshi Aono <brn@b6n.ch>
Tiancheng "Timothy" Gu <timothygu99@gmail.com>
Tobias Burnus <burnus@net-b.de>
Victor Costan <costan@gmail.com>

102
deps/v8/BUILD.gn vendored
View File

@ -64,6 +64,10 @@ declare_args() {
# Enable fast mksnapshot runs.
v8_enable_fast_mksnapshot = false
# Enable embedded builtins.
# TODO(jgruber,v8:6666): Support ia32.
v8_enable_embedded_builtins = false
# Enable code-generation-time checking of types in the CodeStubAssembler.
v8_enable_verify_csa = false
@ -319,6 +323,9 @@ config("features") {
if (v8_check_microtasks_scopes_consistency) {
defines += [ "V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY" ]
}
if (v8_enable_embedded_builtins) {
defines += [ "V8_EMBEDDED_BUILTINS" ]
}
}
config("toolchain") {
@ -387,6 +394,9 @@ config("toolchain") {
"_MIPS_ARCH_MIPS32R6",
"FPU_MODE_FP64",
]
if (mips_use_msa) {
defines += [ "_MIPS_MSA" ]
}
} else if (mips_arch_variant == "r2") {
defines += [ "_MIPS_ARCH_MIPS32R2" ]
if (mips_fpu_mode == "fp64") {
@ -424,6 +434,9 @@ config("toolchain") {
}
if (mips_arch_variant == "r6") {
defines += [ "_MIPS_ARCH_MIPS64R6" ]
if (mips_use_msa) {
defines += [ "_MIPS_MSA" ]
}
} else if (mips_arch_variant == "r2") {
defines += [ "_MIPS_ARCH_MIPS64R2" ]
}
@ -514,8 +527,6 @@ config("toolchain") {
if (is_clang) {
cflags += [
"-Wsign-compare",
# TODO(hans): Remove once http://crbug.com/428099 is resolved.
"-Winconsistent-missing-override",
]
@ -883,6 +894,14 @@ action("v8_dump_build_config") {
"v8_target_cpu=\"$v8_target_cpu\"",
"v8_use_snapshot=$v8_use_snapshot",
]
if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel" ||
v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
args += [
"mips_arch_variant=\"$mips_arch_variant\"",
"mips_use_msa=$mips_use_msa",
]
}
}
###############################################################################
@ -1018,6 +1037,7 @@ v8_source_set("v8_initializers") {
"src/builtins/builtins-arguments-gen.cc",
"src/builtins/builtins-arguments-gen.h",
"src/builtins/builtins-array-gen.cc",
"src/builtins/builtins-array-gen.h",
"src/builtins/builtins-async-function-gen.cc",
"src/builtins/builtins-async-gen.cc",
"src/builtins/builtins-async-gen.h",
@ -1060,8 +1080,11 @@ v8_source_set("v8_initializers") {
"src/builtins/builtins-string-gen.h",
"src/builtins/builtins-symbol-gen.cc",
"src/builtins/builtins-typedarray-gen.cc",
"src/builtins/builtins-typedarray-gen.h",
"src/builtins/builtins-utils-gen.h",
"src/builtins/builtins-wasm-gen.cc",
"src/builtins/growable-fixed-array-gen.cc",
"src/builtins/growable-fixed-array-gen.h",
"src/builtins/setup-builtins-internal.cc",
"src/heap/setup-heap-internal.cc",
"src/ic/accessor-assembler.cc",
@ -1193,7 +1216,6 @@ v8_source_set("v8_base") {
"//base/trace_event/common/trace_event_common.h",
### gcmole(all) ###
"include/v8-debug.h",
"include/v8-inspector-protocol.h",
"include/v8-inspector.h",
"include/v8-platform.h",
@ -1236,8 +1258,6 @@ v8_source_set("v8_base") {
"src/assert-scope.h",
"src/ast/ast-function-literal-id-reindexer.cc",
"src/ast/ast-function-literal-id-reindexer.h",
"src/ast/ast-numbering.cc",
"src/ast/ast-numbering.h",
"src/ast/ast-source-ranges.h",
"src/ast/ast-traversal-visitor.h",
"src/ast/ast-value-factory.cc",
@ -1304,6 +1324,8 @@ v8_source_set("v8_base") {
"src/builtins/builtins-utils.h",
"src/builtins/builtins.cc",
"src/builtins/builtins.h",
"src/builtins/constants-table-builder.cc",
"src/builtins/constants-table-builder.h",
"src/cached-powers.cc",
"src/cached-powers.h",
"src/callable.h",
@ -1396,6 +1418,7 @@ v8_source_set("v8_base") {
"src/compiler/frame-states.h",
"src/compiler/frame.cc",
"src/compiler/frame.h",
"src/compiler/functional-list.h",
"src/compiler/gap-resolver.cc",
"src/compiler/gap-resolver.h",
"src/compiler/graph-assembler.cc",
@ -1639,6 +1662,8 @@ v8_source_set("v8_base") {
"src/global-handles.cc",
"src/global-handles.h",
"src/globals.h",
"src/handler-table.cc",
"src/handler-table.h",
"src/handles-inl.h",
"src/handles.cc",
"src/handles.h",
@ -1670,6 +1695,7 @@ v8_source_set("v8_base") {
"src/heap/invalidated-slots-inl.h",
"src/heap/invalidated-slots.cc",
"src/heap/invalidated-slots.h",
"src/heap/item-parallel-job.cc",
"src/heap/item-parallel-job.h",
"src/heap/local-allocator.h",
"src/heap/mark-compact-inl.h",
@ -1719,6 +1745,8 @@ v8_source_set("v8_base") {
"src/icu_util.h",
"src/identity-map.cc",
"src/identity-map.h",
"src/instruction-stream.cc",
"src/instruction-stream.h",
"src/interface-descriptors.cc",
"src/interface-descriptors.h",
"src/interpreter/block-coverage-builder.h",
@ -1835,6 +1863,8 @@ v8_source_set("v8_base") {
"src/objects/js-array.h",
"src/objects/js-collection-inl.h",
"src/objects/js-collection.h",
"src/objects/js-promise-inl.h",
"src/objects/js-promise.h",
"src/objects/js-regexp-inl.h",
"src/objects/js-regexp.h",
"src/objects/literal-objects-inl.h",
@ -1842,6 +1872,8 @@ v8_source_set("v8_base") {
"src/objects/literal-objects.h",
"src/objects/map-inl.h",
"src/objects/map.h",
"src/objects/microtask-inl.h",
"src/objects/microtask.h",
"src/objects/module-inl.h",
"src/objects/module.cc",
"src/objects/module.h",
@ -1849,6 +1881,8 @@ v8_source_set("v8_base") {
"src/objects/name.h",
"src/objects/object-macros-undef.h",
"src/objects/object-macros.h",
"src/objects/promise-inl.h",
"src/objects/promise.h",
"src/objects/property-descriptor-object-inl.h",
"src/objects/property-descriptor-object.h",
"src/objects/regexp-match-info.h",
@ -1865,8 +1899,6 @@ v8_source_set("v8_base") {
"src/objects/template-objects.h",
"src/ostreams.cc",
"src/ostreams.h",
"src/parsing/background-parsing-task.cc",
"src/parsing/background-parsing-task.h",
"src/parsing/duplicate-finder.h",
"src/parsing/expression-classifier.h",
"src/parsing/expression-scope-reparenter.cc",
@ -2126,8 +2158,6 @@ v8_source_set("v8_base") {
"src/wasm/signature-map.h",
"src/wasm/streaming-decoder.cc",
"src/wasm/streaming-decoder.h",
"src/wasm/wasm-api.cc",
"src/wasm/wasm-api.h",
"src/wasm/wasm-code-manager.cc",
"src/wasm/wasm-code-manager.h",
"src/wasm/wasm-code-specialization.cc",
@ -2570,11 +2600,15 @@ v8_component("v8_libbase") {
if (is_posix) {
sources += [
"src/base/platform/platform-posix-time.cc",
"src/base/platform/platform-posix-time.h",
"src/base/platform/platform-posix.cc",
"src/base/platform/platform-posix.h",
]
if (current_os != "aix") {
sources += [
"src/base/platform/platform-posix-time.cc",
"src/base/platform/platform-posix-time.h",
]
}
}
if (is_linux) {
@ -2824,7 +2858,7 @@ group("v8_clusterfuzz") {
if (v8_test_isolation_mode != "noop") {
deps += [
"tools:run-deopt-fuzzer_run",
"test:d8_default_run",
"tools:run-num-fuzzer_run",
]
}
@ -2842,9 +2876,9 @@ group("v8_fuzzers") {
":v8_simple_json_fuzzer",
":v8_simple_multi_return_fuzzer",
":v8_simple_parser_fuzzer",
":v8_simple_regexp_builtins_fuzzer",
":v8_simple_regexp_fuzzer",
":v8_simple_wasm_async_fuzzer",
":v8_simple_wasm_call_fuzzer",
":v8_simple_wasm_code_fuzzer",
":v8_simple_wasm_compile_fuzzer",
":v8_simple_wasm_data_section_fuzzer",
@ -2952,7 +2986,7 @@ v8_executable("d8") {
}
if (v8_correctness_fuzzer) {
deps += [ "tools/foozzie:v8_correctness_fuzzer_resources" ]
deps += [ "tools/clusterfuzz:v8_correctness_fuzzer_resources" ]
}
defines = []
@ -3127,6 +3161,25 @@ v8_source_set("parser_fuzzer") {
v8_fuzzer("parser_fuzzer") {
}
v8_source_set("regexp_builtins_fuzzer") {
sources = [
"test/fuzzer/regexp-builtins.cc",
"test/fuzzer/regexp_builtins/mjsunit.js.h",
]
deps = [
":fuzzer_support",
]
configs = [
":external_config",
":internal_config_base",
]
}
v8_fuzzer("regexp_builtins_fuzzer") {
}
v8_source_set("regexp_fuzzer") {
sources = [
"test/fuzzer/regexp.cc",
@ -3218,27 +3271,6 @@ v8_source_set("wasm_code_fuzzer") {
v8_fuzzer("wasm_code_fuzzer") {
}
v8_source_set("wasm_call_fuzzer") {
sources = [
"test/common/wasm/test-signatures.h",
"test/fuzzer/wasm-call.cc",
]
deps = [
":fuzzer_support",
":lib_wasm_fuzzer_common",
":wasm_module_runner",
]
configs = [
":external_config",
":internal_config_base",
]
}
v8_fuzzer("wasm_call_fuzzer") {
}
v8_source_set("lib_wasm_fuzzer_common") {
sources = [
"test/fuzzer/wasm-fuzzer-common.cc",

1745
deps/v8/ChangeLog vendored

File diff suppressed because it is too large Load Diff

126
deps/v8/DEPS vendored
View File

@ -5,20 +5,22 @@
vars = {
'checkout_instrumented_libraries': False,
'chromium_url': 'https://chromium.googlesource.com',
'build_for_node': False,
'download_gcmole': False,
'download_jsfunfuzz': False,
'download_mips_toolchain': False,
}
deps = {
'v8/build':
Var('chromium_url') + '/chromium/src/build.git' + '@' + 'b3a78cd03a95c30ff10f863f736249eb04f0f34d',
Var('chromium_url') + '/chromium/src/build.git' + '@' + 'b1d6c28b4a64128ad856d9da458afda2861fddab',
'v8/tools/gyp':
Var('chromium_url') + '/external/gyp.git' + '@' + 'd61a9397e668fa9843c4aa7da9e79460fe590bfb',
'v8/third_party/icu':
Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'c8ca2962b46670ec89071ffd1291688983cd319c',
Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'd888fd2a1be890f4d35e43f68d6d79f42519a357',
'v8/third_party/instrumented_libraries':
Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + 'b7578b4132cf73ca3265e2ee0b7bd0a422a54ebf',
Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + 'b745ddca2c63719167c0f2008ae19e667c5e9952',
'v8/buildtools':
Var('chromium_url') + '/chromium/buildtools.git' + '@' + '6fe4a3251488f7af86d64fc25cf442e817cf6133',
Var('chromium_url') + '/chromium/buildtools.git' + '@' + '2888931260f2a32bc583f005bd807a561b2fa6af',
'v8/base/trace_event/common':
Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '0e9a47d74970bee1bbfc063c47215406f8918699',
'v8/third_party/android_ndk': {
@ -26,11 +28,11 @@ deps = {
'condition': 'checkout_android',
},
'v8/third_party/android_tools': {
'url': Var('chromium_url') + '/android_tools.git' + '@' + 'c78b25872734e0038ae2a333edc645cd96bc232d',
'url': Var('chromium_url') + '/android_tools.git' + '@' + '9a70d48fcdd68cd0e7e968f342bd767ee6323bd1',
'condition': 'checkout_android',
},
'v8/third_party/catapult': {
'url': Var('chromium_url') + '/catapult.git' + '@' + 'b4826a52853c9c2778d496f6c6fa853f777f94df',
'url': Var('chromium_url') + '/catapult.git' + '@' + '8a42ad3cb185e340c32b20f657980fd057e3769f',
'condition': 'checkout_android',
},
'v8/third_party/colorama/src': {
@ -52,15 +54,15 @@ deps = {
'v8/test/mozilla/data':
Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be',
'v8/test/test262/data':
Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '8311965251953d4745aeb68c98fb71fab2eac1d0',
Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'b59d956b3c268abd0875aeb87d6688f4c7aafc9b',
'v8/test/test262/harness':
Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '0f2acdd882c84cff43b9d60df7574a1901e2cdcd',
'v8/tools/clang':
Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '27088876ff821e8a1518383576a43662a3255d56',
Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'b3d3f5920b161f95f1a8ffe08b75c695e0edf350',
'v8/tools/luci-go':
Var('chromium_url') + '/chromium/src/tools/luci-go.git' + '@' + 'd882048313f6f51df29856406fa03b620c1d0205',
Var('chromium_url') + '/chromium/src/tools/luci-go.git' + '@' + 'ff0709d4283b1f233dcf0c9fec1672c6ecaed2f1',
'v8/test/wasm-js':
Var('chromium_url') + '/external/github.com/WebAssembly/spec.git' + '@' + 'a25083ac7076b05e3f304ec9e093ef1b1ee09422',
Var('chromium_url') + '/external/github.com/WebAssembly/spec.git' + '@' + '4653fc002a510b4f207af07f2c7c61b13dba78d9',
}
recursedeps = [
@ -78,7 +80,6 @@ include_rules = [
# checkdeps.py shouldn't check for includes in these directories:
skip_child_includes = [
'build',
'gypfiles',
'third_party',
]
@ -91,14 +92,16 @@ hooks = [
'pattern': '.',
'action': [
'python',
'v8/gypfiles/landmines.py',
'v8/build/landmines.py',
'--landmine-scripts',
'v8/tools/get_landmines.py',
],
},
# Pull clang-format binaries using checked-in hashes.
{
'name': 'clang_format_win',
'pattern': '.',
'condition': 'host_os == "win" and build_for_node != True',
'condition': 'host_os == "win"',
'action': [ 'download_from_google_storage',
'--no_resume',
'--platform=win32',
@ -110,7 +113,7 @@ hooks = [
{
'name': 'clang_format_mac',
'pattern': '.',
'condition': 'host_os == "mac" and build_for_node != True',
'condition': 'host_os == "mac"',
'action': [ 'download_from_google_storage',
'--no_resume',
'--platform=darwin',
@ -122,7 +125,7 @@ hooks = [
{
'name': 'clang_format_linux',
'pattern': '.',
'condition': 'host_os == "linux" and build_for_node != True',
'condition': 'host_os == "linux"',
'action': [ 'download_from_google_storage',
'--no_resume',
'--platform=linux*',
@ -134,28 +137,30 @@ hooks = [
{
'name': 'gcmole',
'pattern': '.',
'condition': 'build_for_node != True',
# TODO(machenbach): Insert condition and remove GYP_DEFINES dependency.
'action': [
'python',
'v8/tools/gcmole/download_gcmole_tools.py',
'condition': 'download_gcmole',
'action': [ 'download_from_google_storage',
'--bucket', 'chrome-v8-gcmole',
'-u', '--no_resume',
'-s', 'v8/tools/gcmole/gcmole-tools.tar.gz.sha1',
'--platform=linux*',
],
},
{
'name': 'jsfunfuzz',
'pattern': '.',
'condition': 'build_for_node != True',
# TODO(machenbach): Insert condition and remove GYP_DEFINES dependency.
'action': [
'python',
'v8/tools/jsfunfuzz/download_jsfunfuzz.py',
'condition': 'download_jsfunfuzz',
'action': [ 'download_from_google_storage',
'--bucket', 'chrome-v8-jsfunfuzz',
'-u', '--no_resume',
'-s', 'v8/tools/jsfunfuzz/jsfunfuzz.tar.gz.sha1',
'--platform=linux*',
],
},
# Pull luci-go binaries (isolate, swarming) using checked-in hashes.
{
'name': 'luci-go_win',
'pattern': '.',
'condition': 'host_os == "win" and build_for_node != True',
'condition': 'host_os == "win"',
'action': [ 'download_from_google_storage',
'--no_resume',
'--platform=win32',
@ -167,7 +172,7 @@ hooks = [
{
'name': 'luci-go_mac',
'pattern': '.',
'condition': 'host_os == "mac" and build_for_node != True',
'condition': 'host_os == "mac"',
'action': [ 'download_from_google_storage',
'--no_resume',
'--platform=darwin',
@ -179,7 +184,7 @@ hooks = [
{
'name': 'luci-go_linux',
'pattern': '.',
'condition': 'host_os == "linux" and build_for_node != True',
'condition': 'host_os == "linux"',
'action': [ 'download_from_google_storage',
'--no_resume',
'--platform=linux*',
@ -228,7 +233,6 @@ hooks = [
{
'name': 'wasm_spec_tests',
'pattern': '.',
'condition': 'build_for_node != True',
'action': [ 'download_from_google_storage',
'--no_resume',
'--no_auth',
@ -240,7 +244,6 @@ hooks = [
{
'name': 'closure_compiler',
'pattern': '.',
'condition': 'build_for_node != True',
'action': [ 'download_from_google_storage',
'--no_resume',
'--no_auth',
@ -250,17 +253,39 @@ hooks = [
],
},
{
# Downloads the current stable linux sysroot to build/linux/ if needed.
# This sysroot updates at about the same rate that the chrome build deps
# change.
'name': 'sysroot',
'name': 'sysroot_arm',
'pattern': '.',
'condition': 'build_for_node != True',
'action': [
'python',
'v8/build/linux/sysroot_scripts/install-sysroot.py',
'--running-as-hook',
],
'condition': 'checkout_linux and checkout_arm',
'action': ['python', 'v8/build/linux/sysroot_scripts/install-sysroot.py',
'--arch=arm'],
},
{
'name': 'sysroot_arm64',
'pattern': '.',
'condition': 'checkout_linux and checkout_arm64',
'action': ['python', 'v8/build/linux/sysroot_scripts/install-sysroot.py',
'--arch=arm64'],
},
{
'name': 'sysroot_x86',
'pattern': '.',
'condition': 'checkout_linux and (checkout_x86 or checkout_x64)',
'action': ['python', 'v8/build/linux/sysroot_scripts/install-sysroot.py',
'--arch=x86'],
},
{
'name': 'sysroot_mips',
'pattern': '.',
'condition': 'checkout_linux and checkout_mips',
'action': ['python', 'v8/build/linux/sysroot_scripts/install-sysroot.py',
'--arch=mips'],
},
{
'name': 'sysroot_x64',
'pattern': '.',
'condition': 'checkout_linux and checkout_x64',
'action': ['python', 'v8/build/linux/sysroot_scripts/install-sysroot.py',
'--arch=x64'],
},
{
'name': 'msan_chained_origins',
@ -297,7 +322,7 @@ hooks = [
{
'name': 'binutils',
'pattern': 'v8/third_party/binutils',
'condition': 'host_os == "linux" and build_for_node != True',
'condition': 'host_os == "linux"',
'action': [
'python',
'v8/third_party/binutils/download.py',
@ -307,6 +332,8 @@ hooks = [
# Note: On Win, this should run after win_toolchain, as it may use it.
'name': 'clang',
'pattern': '.',
# clang not supported on aix
'condition': 'host_os != "aix"',
'action': ['python', 'v8/tools/clang/scripts/update.py'],
},
{
@ -316,15 +343,20 @@ hooks = [
'action': [
'python',
'v8/build/fuchsia/update_sdk.py',
'226f6dd0cad1d6be63a353ce2649423470729ae9',
],
},
{
# A change to a .gyp, .gypi, or to GYP itself should run the generator.
'name': 'regyp_if_needed',
'name': 'mips_toolchain',
'pattern': '.',
'condition': 'build_for_node != True',
'action': ['python', 'v8/gypfiles/gyp_v8', '--running-as-hook'],
'condition': 'download_mips_toolchain',
'action': [ 'download_from_google_storage',
'--no_resume',
'--platform=linux',
'--no_auth',
'-u',
'--bucket', 'chromium-v8',
'-s', 'v8/tools/mips_toolchain.tar.gz.sha1',
],
},
# Download and initialize "vpython" VirtualEnv environment packages.
{

493
deps/v8/Makefile vendored
View File

@ -1,493 +0,0 @@
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Variable default definitions. Override them by exporting them in your shell.
OUTDIR ?= out
TESTJOBS ?=
GYPFLAGS ?=
TESTFLAGS ?=
ANDROID_NDK_HOST_ARCH ?=
ANDROID_V8 ?= /data/local/tmp/v8
# Special build flags. Use them like this: "make library=shared"
# library=shared || component=shared_library
ifeq ($(library), shared)
GYPFLAGS += -Dcomponent=shared_library
endif
ifdef component
GYPFLAGS += -Dcomponent=$(component)
endif
# disassembler=on
ifeq ($(disassembler), on)
GYPFLAGS += -Dv8_enable_disassembler=1
endif
# objectprint=on
ifeq ($(objectprint), on)
GYPFLAGS += -Dv8_object_print=1
endif
# verifycsa=on
ifeq ($(verifycsa), on)
GYPFLAGS += -Dv8_enable_verify_csa=1
endif
# verifyheap=on
ifeq ($(verifyheap), on)
GYPFLAGS += -Dv8_enable_verify_heap=1
endif
# tracemaps=on
ifeq ($(tracemaps), on)
GYPFLAGS += -Dv8_trace_maps=1
endif
# concurrentmarking=on
ifeq ($(concurrentmarking), on)
GYPFLAGS += -Dv8_enable_concurrent_marking=1
endif
# backtrace=off
ifeq ($(backtrace), off)
GYPFLAGS += -Dv8_enable_backtrace=0
else
GYPFLAGS += -Dv8_enable_backtrace=1
endif
# verifypredictable=on
ifeq ($(verifypredictable), on)
GYPFLAGS += -Dv8_enable_verify_predictable=1
endif
# snapshot=off
ifeq ($(snapshot), off)
GYPFLAGS += -Dv8_use_snapshot='false'
endif
ifeq ($(snapshot), external)
GYPFLAGS += -Dv8_use_external_startup_data=1
endif
# extrachecks=on/off
ifeq ($(extrachecks), on)
GYPFLAGS += -Ddcheck_always_on=1 -Dv8_enable_handle_zapping=1
endif
ifeq ($(extrachecks), off)
GYPFLAGS += -Ddcheck_always_on=0 -Dv8_enable_handle_zapping=0
endif
# slowdchecks=on/off
ifeq ($(slowdchecks), on)
GYPFLAGS += -Dv8_enable_slow_dchecks=1
endif
ifeq ($(slowdchecks), off)
GYPFLAGS += -Dv8_enable_slow_dchecks=0
endif
# debugsymbols=on
ifeq ($(debugsymbols), on)
GYPFLAGS += -Drelease_extra_cflags=-ggdb3
endif
# gdbjit=on/off
ifeq ($(gdbjit), on)
GYPFLAGS += -Dv8_enable_gdbjit=1
endif
ifeq ($(gdbjit), off)
GYPFLAGS += -Dv8_enable_gdbjit=0
endif
# vtunejit=on
ifeq ($(vtunejit), on)
GYPFLAGS += -Dv8_enable_vtunejit=1
endif
# unalignedaccess=on
ifeq ($(unalignedaccess), on)
GYPFLAGS += -Dv8_can_use_unaligned_accesses=true
endif
# randomseed=12345, disable random seed via randomseed=0
ifdef randomseed
GYPFLAGS += -Dv8_random_seed=$(randomseed)
endif
# soname_version=1.2.3
ifdef soname_version
GYPFLAGS += -Dsoname_version=$(soname_version)
endif
# werror=no
ifeq ($(werror), no)
GYPFLAGS += -Dwerror=''
endif
# strictaliasing=off (workaround for GCC-4.5)
ifeq ($(strictaliasing), off)
GYPFLAGS += -Dv8_no_strict_aliasing=1
endif
# regexp=interpreted
ifeq ($(regexp), interpreted)
GYPFLAGS += -Dv8_interpreted_regexp=1
endif
# i18nsupport=off
ifeq ($(i18nsupport), off)
GYPFLAGS += -Dv8_enable_i18n_support=0
TESTFLAGS += --noi18n
endif
# deprecationwarnings=on
ifeq ($(deprecationwarnings), on)
GYPFLAGS += -Dv8_deprecation_warnings=1
endif
# vectorstores=on
ifeq ($(vectorstores), on)
GYPFLAGS += -Dv8_vector_stores=1
endif
# imminentdeprecationwarnings=on
ifeq ($(imminentdeprecationwarnings), on)
GYPFLAGS += -Dv8_imminent_deprecation_warnings=1
endif
# asan=on
ifeq ($(asan), on)
GYPFLAGS += -Dasan=1 -Dclang=1
TESTFLAGS += --asan
ifeq ($(lsan), on)
GYPFLAGS += -Dlsan=1
endif
endif
ifdef embedscript
GYPFLAGS += -Dembed_script=$(embedscript)
endif
ifdef warmupscript
GYPFLAGS += -Dwarmup_script=$(warmupscript)
endif
ifeq ($(goma), on)
GYPFLAGS += -Duse_goma=1
endif
# v8_os_page_size=0, when 0 or not specified use build OS page size
ifdef v8_os_page_size
ifneq ($(v8_os_page_size), 0)
ifneq ($(snapshot), off)
GYPFLAGS += -Dv8_os_page_size=$(v8_os_page_size)
endif
endif
endif
# arm specific flags.
# arm_version=<number | "default">
ifneq ($(strip $(arm_version)),)
GYPFLAGS += -Darm_version=$(arm_version)
else
# Deprecated (use arm_version instead): armv7=false/true
ifeq ($(armv7), false)
GYPFLAGS += -Darm_version=6
else
ifeq ($(armv7), true)
GYPFLAGS += -Darm_version=7
endif
endif
endif
# hardfp=on/off. Deprecated, use armfloatabi
ifeq ($(hardfp),on)
GYPFLAGS += -Darm_float_abi=hard
else
ifeq ($(hardfp),off)
GYPFLAGS += -Darm_float_abi=softfp
endif
endif
# fpu: armfpu=xxx
# xxx: vfp, vfpv3-d16, vfpv3, neon.
ifeq ($(armfpu),)
GYPFLAGS += -Darm_fpu=default
else
GYPFLAGS += -Darm_fpu=$(armfpu)
endif
# float abi: armfloatabi=softfp/hard
ifeq ($(armfloatabi),)
ifeq ($(hardfp),)
GYPFLAGS += -Darm_float_abi=default
endif
else
GYPFLAGS += -Darm_float_abi=$(armfloatabi)
endif
# armthumb=on/off
ifeq ($(armthumb), off)
GYPFLAGS += -Darm_thumb=0
else
ifeq ($(armthumb), on)
GYPFLAGS += -Darm_thumb=1
endif
endif
# arm_test_noprobe=on
# With this flag set, by default v8 will only use features implied
# by the compiler (no probe). This is done by modifying the default
# values of enable_armv7, enable_vfp3, enable_32dregs and enable_neon.
# Modifying these flags when launching v8 will enable the probing for
# the specified values.
ifeq ($(arm_test_noprobe), on)
GYPFLAGS += -Darm_test_noprobe=on
endif
# Do not omit the frame pointer, needed for profiling with perf
ifeq ($(no_omit_framepointer), on)
GYPFLAGS += -Drelease_extra_cflags=-fno-omit-frame-pointer
endif
ifdef android_ndk_root
GYPFLAGS += -Dandroid_ndk_root=$(android_ndk_root)
export ANDROID_NDK_ROOT = $(android_ndk_root)
endif
# ----------------- available targets: --------------------
# - any arch listed in ARCHES (see below)
# - any mode listed in MODES
# - every combination <arch>.<mode>, e.g. "ia32.release"
# - "native": current host's architecture, release mode
# - any of the above with .check appended, e.g. "ia32.release.check"
# - "android": cross-compile for Android/ARM
# - default (no target specified): build all DEFAULT_ARCHES and MODES
# - "check": build all targets and run all tests
# - "<arch>.clean" for any <arch> in ARCHES
# - "clean": clean all ARCHES
# ----------------- internal stuff ------------------------
# Architectures and modes to be compiled. Consider these to be internal
# variables, don't override them (use the targets instead).
ARCHES = ia32 x64 arm arm64 mips mipsel mips64 mips64el ppc ppc64 s390 s390x
ARCHES32 = ia32 arm mips mipsel ppc s390
DEFAULT_ARCHES = ia32 x64 arm
MODES = release debug optdebug
DEFAULT_MODES = release debug
ANDROID_ARCHES = android_ia32 android_x64 android_arm android_arm64 \
android_mipsel
# List of files that trigger Makefile regeneration:
GYPFILES = third_party/icu/icu.gypi third_party/icu/icu.gyp \
gypfiles/shim_headers.gypi gypfiles/features.gypi \
gypfiles/standalone.gypi \
gypfiles/toolchain.gypi gypfiles/all.gyp gypfiles/mac/asan.gyp \
test/cctest/cctest.gyp test/fuzzer/fuzzer.gyp \
test/unittests/unittests.gyp src/v8.gyp \
tools/parser-shell.gyp testing/gmock.gyp testing/gtest.gyp \
samples/samples.gyp src/third_party/vtune/v8vtune.gyp src/d8.gyp
# If vtunejit=on, the v8vtune.gyp will be appended.
ifeq ($(vtunejit), on)
GYPFILES += src/third_party/vtune/v8vtune.gyp
endif
# Generates all combinations of ARCHES and MODES, e.g. "ia32.release".
BUILDS = $(foreach mode,$(MODES),$(addsuffix .$(mode),$(ARCHES)))
ANDROID_BUILDS = $(foreach mode,$(MODES), \
$(addsuffix .$(mode),$(ANDROID_ARCHES)))
# Generates corresponding test targets, e.g. "ia32.release.check".
CHECKS = $(addsuffix .check,$(BUILDS))
QUICKCHECKS = $(addsuffix .quickcheck,$(BUILDS))
ANDROID_CHECKS = $(addsuffix .check,$(ANDROID_BUILDS))
# File where previously used GYPFLAGS are stored.
ENVFILE = $(OUTDIR)/environment
.PHONY: all check clean builddeps dependencies $(ENVFILE).new native \
qc quickcheck $(QUICKCHECKS) turbocheck \
$(addsuffix .quickcheck,$(MODES)) $(addsuffix .quickcheck,$(ARCHES)) \
$(ARCHES) $(MODES) $(BUILDS) $(CHECKS) $(addsuffix .clean,$(ARCHES)) \
$(addsuffix .check,$(MODES)) $(addsuffix .check,$(ARCHES)) \
$(ANDROID_ARCHES) $(ANDROID_BUILDS) $(ANDROID_CHECKS)
# Target definitions. "all" is the default.
all: $(DEFAULT_MODES)
# Special target for the buildbots to use. Depends on $(OUTDIR)/Makefile
# having been created before.
buildbot:
$(MAKE) -C "$(OUTDIR)" BUILDTYPE=$(BUILDTYPE) \
builddir="$(abspath $(OUTDIR))/$(BUILDTYPE)"
# Compile targets. MODES and ARCHES are convenience targets.
.SECONDEXPANSION:
$(MODES): $(addsuffix .$$@,$(DEFAULT_ARCHES))
$(ARCHES): $(addprefix $$@.,$(DEFAULT_MODES))
# Defines how to build a particular target (e.g. ia32.release).
$(BUILDS): $(OUTDIR)/Makefile.$$@
@$(MAKE) -C "$(OUTDIR)" -f Makefile.$@ \
BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \
python -c "print \
raw_input().replace('opt', '').capitalize()") \
builddir="$(shell pwd)/$(OUTDIR)/$@"
native: $(OUTDIR)/Makefile.native
@$(MAKE) -C "$(OUTDIR)" -f Makefile.native \
BUILDTYPE=Release \
builddir="$(shell pwd)/$(OUTDIR)/$@"
$(ANDROID_ARCHES): $(addprefix $$@.,$(MODES))
$(ANDROID_BUILDS): $(GYPFILES) $(ENVFILE) Makefile.android
@$(MAKE) -f Makefile.android $@ \
ARCH="$(basename $@)" \
MODE="$(subst .,,$(suffix $@))" \
OUTDIR="$(OUTDIR)" \
GYPFLAGS="$(GYPFLAGS)"
# Test targets.
check: all
@gypfiles/run-tests-legacy.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch=$(shell echo $(DEFAULT_ARCHES) | sed -e 's/ /,/g') \
$(TESTFLAGS)
$(addsuffix .check,$(MODES)): $$(basename $$@)
@gypfiles/run-tests-legacy.py $(TESTJOBS) --outdir=$(OUTDIR) \
--mode=$(basename $@) $(TESTFLAGS)
$(addsuffix .check,$(ARCHES)): $$(basename $$@)
@gypfiles/run-tests-legacy.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch=$(basename $@) $(TESTFLAGS)
$(CHECKS): $$(basename $$@)
@gypfiles/run-tests-legacy.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch-and-mode=$(basename $@) $(TESTFLAGS)
$(addsuffix .quickcheck,$(MODES)): $$(basename $$@)
@gypfiles/run-tests-legacy.py $(TESTJOBS) --outdir=$(OUTDIR) \
--mode=$(basename $@) $(TESTFLAGS) --quickcheck
$(addsuffix .quickcheck,$(ARCHES)): $$(basename $$@)
@gypfiles/run-tests-legacy.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch=$(basename $@) $(TESTFLAGS) --quickcheck
$(QUICKCHECKS): $$(basename $$@)
@gypfiles/run-tests-legacy.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch-and-mode=$(basename $@) $(TESTFLAGS) --quickcheck
$(addsuffix .sync, $(ANDROID_BUILDS)): $$(basename $$@)
@tools/android-sync.sh $(basename $@) $(OUTDIR) \
$(shell pwd) $(ANDROID_V8)
$(addsuffix .check, $(ANDROID_BUILDS)): $$(basename $$@).sync
@gypfiles/run-tests-legacy.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch-and-mode=$(basename $@) \
--timeout=600 \
--command-prefix="tools/android-run.py" $(TESTFLAGS)
$(addsuffix .check, $(ANDROID_ARCHES)): \
$(addprefix $$(basename $$@).,$(MODES)).check
native.check: native
@gypfiles/run-tests-legacy.py $(TESTJOBS) --outdir=$(OUTDIR)/native \
--arch-and-mode=. $(TESTFLAGS)
SUPERFASTTESTMODES = ia32.release
FASTTESTMODES = $(SUPERFASTTESTMODES),x64.release,ia32.optdebug,x64.optdebug,arm.optdebug,arm64.release
FASTCOMPILEMODES = $(FASTTESTMODES),arm64.optdebug
COMMA = ,
EMPTY =
SPACE = $(EMPTY) $(EMPTY)
quickcheck: $(subst $(COMMA),$(SPACE),$(FASTCOMPILEMODES))
gypfiles/run-tests-legacy.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch-and-mode=$(SUPERFASTTESTMODES) $(TESTFLAGS) --quickcheck \
--download-data mozilla webkit
gypfiles/run-tests-legacy.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch-and-mode=$(FASTTESTMODES) $(TESTFLAGS) --quickcheck
qc: quickcheck
turbocheck: $(subst $(COMMA),$(SPACE),$(FASTCOMPILEMODES))
gypfiles/run-tests-legacy.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch-and-mode=$(SUPERFASTTESTMODES) $(TESTFLAGS) \
--quickcheck --variants=turbofan --download-data mozilla webkit
gypfiles/run-tests-legacy.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch-and-mode=$(FASTTESTMODES) $(TESTFLAGS) \
--quickcheck --variants=turbofan
tc: turbocheck
# Clean targets. You can clean each architecture individually, or everything.
$(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES)):
rm -f $(OUTDIR)/Makefile.$(basename $@)*
rm -rf $(OUTDIR)/$(basename $@).release
rm -rf $(OUTDIR)/$(basename $@).debug
rm -rf $(OUTDIR)/$(basename $@).optdebug
find $(OUTDIR) -regex '.*\(host\|target\)\.$(basename $@).*\.mk' -delete
native.clean:
rm -f $(OUTDIR)/Makefile.native
rm -rf $(OUTDIR)/native
find $(OUTDIR) -regex '.*\(host\|target\)\.native\.mk' -delete
clean: $(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES)) native.clean gtags.clean tags.clean
# GYP file generation targets.
OUT_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(BUILDS))
$(OUT_MAKEFILES): $(GYPFILES) $(ENVFILE)
$(eval CXX_TARGET_ARCH:=$(shell $(CXX) -v 2>&1 | grep ^Target: | \
cut -f 2 -d " " | cut -f 1 -d "-" ))
$(eval CXX_TARGET_ARCH:=$(subst aarch64,arm64,$(CXX_TARGET_ARCH)))
$(eval CXX_TARGET_ARCH:=$(subst x86_64,x64,$(CXX_TARGET_ARCH)))
$(eval CXX_TARGET_ARCH:=$(subst s390x,s390,$(CXX_TARGET_ARCH)))
$(eval CXX_TARGET_ARCH:=$(subst powerpc,ppc,$(CXX_TARGET_ARCH)))
$(eval CXX_TARGET_ARCH:=$(subst ppc64,ppc,$(CXX_TARGET_ARCH)))
$(eval CXX_TARGET_ARCH:=$(subst ppcle,ppc,$(CXX_TARGET_ARCH)))
$(eval V8_TARGET_ARCH:=$(subst .,,$(suffix $(basename $@))))
PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/gypfiles:$(PYTHONPATH):$(shell pwd)/tools/gyp/pylib:$(PYTHONPATH)" \
GYP_GENERATORS=make \
tools/gyp/gyp --generator-output="$(OUTDIR)" gypfiles/all.gyp \
-Igypfiles/standalone.gypi --depth=. \
-Dv8_target_arch=$(V8_TARGET_ARCH) \
$(if $(findstring $(CXX_TARGET_ARCH),$(V8_TARGET_ARCH)), \
-Dtarget_arch=$(V8_TARGET_ARCH), \
$(if $(shell echo $(ARCHES32) | grep $(V8_TARGET_ARCH)), \
-Dtarget_arch=ia32,)) \
$(if $(findstring optdebug,$@),-Dv8_optimized_debug=1,) \
-S$(suffix $(basename $@))$(suffix $@) $(GYPFLAGS)
$(OUTDIR)/Makefile.native: $(GYPFILES) $(ENVFILE)
PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/gypfiles:$(PYTHONPATH):$(shell pwd)/tools/gyp/pylib:$(PYTHONPATH)" \
GYP_GENERATORS=make \
tools/gyp/gyp --generator-output="$(OUTDIR)" gypfiles/all.gyp \
-Igypfiles/standalone.gypi --depth=. -S.native $(GYPFLAGS)
# Replaces the old with the new environment file if they're different, which
# will trigger GYP to regenerate Makefiles.
$(ENVFILE): $(ENVFILE).new
@if test -r $(ENVFILE) && cmp $(ENVFILE).new $(ENVFILE) > /dev/null; \
then rm $(ENVFILE).new; \
else mv $(ENVFILE).new $(ENVFILE); fi
# Stores current GYPFLAGS in a file.
$(ENVFILE).new:
$(eval CXX_TARGET_ARCH:=$(shell $(CXX) -v 2>&1 | grep ^Target: | \
cut -f 2 -d " " | cut -f 1 -d "-" ))
$(eval CXX_TARGET_ARCH:=$(subst aarch64,arm64,$(CXX_TARGET_ARCH)))
$(eval CXX_TARGET_ARCH:=$(subst x86_64,x64,$(CXX_TARGET_ARCH)))
@mkdir -p $(OUTDIR); echo "GYPFLAGS=$(GYPFLAGS) -Dtarget_arch=$(CXX_TARGET_ARCH)" > $(ENVFILE).new;
# Support for the GNU GLOBAL Source Code Tag System.
gtags.files: $(GYPFILES) $(ENVFILE)
@find include src test -name '*.h' -o -name '*.cc' -o -name '*.c' > $@
# We need to manually set the stack limit here, to work around bugs in
# gmake-3.81 and global-5.7.1 on recent 64-bit Linux systems.
# Using $(wildcard ...) gracefully ignores non-existing files, so that stale
# gtags.files after switching branches don't cause recipe failures.
GPATH GRTAGS GSYMS GTAGS: gtags.files $(wildcard $(shell cat gtags.files 2> /dev/null))
@bash -c 'ulimit -s 10240 && GTAGSFORCECPP=yes gtags -i -q -f $<'
gtags.clean:
rm -f gtags.files GPATH GRTAGS GSYMS GTAGS
tags: gtags.files $(wildcard $(shell cat gtags.files 2> /dev/null))
@(ctags --version | grep 'Exuberant Ctags' >/dev/null) || \
(echo "Please install Exuberant Ctags (check 'ctags --version')" >&2; false)
ctags --fields=+l -L $<
tags.clean:
rm -r tags
dependencies builddeps:
$(error Use 'gclient sync' instead)

View File

@ -1,72 +0,0 @@
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Those definitions should be consistent with the main Makefile
ANDROID_ARCHES = android_ia32 android_x64 android_arm android_arm64 \
android_mipsel android_x87
MODES = release debug
# Generates all combinations of ANDROID ARCHES and MODES,
# e.g. "android_ia32.release" or "android_arm.release"
ANDROID_BUILDS = $(foreach mode,$(MODES), \
$(addsuffix .$(mode),$(ANDROID_ARCHES)))
ifeq ($(ARCH), android_arm)
DEFINES = target_arch=arm v8_target_arch=arm
else ifeq ($(ARCH), android_arm64)
DEFINES = target_arch=arm64 v8_target_arch=arm64
else ifeq ($(ARCH), android_mipsel)
DEFINES = target_arch=mipsel v8_target_arch=mipsel
else ifeq ($(ARCH), android_ia32)
DEFINES = target_arch=ia32 v8_target_arch=ia32
else ifeq ($(ARCH), android_x64)
DEFINES = target_arch=x64 v8_target_arch=x64
else ifeq ($(ARCH), android_x87)
DEFINES = target_arch=ia32 v8_target_arch=x87
else
$(error Target architecture "${ARCH}" is not supported)
endif
# Common flags.
DEFINES += OS=android
.SECONDEXPANSION:
$(ANDROID_BUILDS): $(OUTDIR)/Makefile.$$@
@$(MAKE) -C "$(OUTDIR)" -f Makefile.$@ \
BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \
python -c "print raw_input().capitalize()") \
builddir="$(shell pwd)/$(OUTDIR)/$@"
# Android GYP file generation targets.
ANDROID_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(ANDROID_BUILDS))
$(ANDROID_MAKEFILES):
GYP_GENERATORS=make-android \
GYP_DEFINES="${DEFINES}" \
PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/gypfiles:$(PYTHONPATH)" \
tools/gyp/gyp --generator-output="${OUTDIR}" gypfiles/all.gyp \
-Igypfiles/standalone.gypi --depth=. \
-S$(suffix $(basename $@))$(suffix $@) ${GYPFLAGS}

7
deps/v8/OWNERS vendored
View File

@ -7,6 +7,7 @@ bradnelson@chromium.org
cbruni@chromium.org
clemensh@chromium.org
danno@chromium.org
delphick@chromium.org
eholk@chromium.org
franzih@chromium.org
gdeepti@chromium.org
@ -17,26 +18,22 @@ ishell@chromium.org
jarin@chromium.org
jgruber@chromium.org
jkummerow@chromium.org
jochen@chromium.org
leszeks@chromium.org
littledan@chromium.org
machenbach@chromium.org
marja@chromium.org
mlippautz@chromium.org
mstarzinger@chromium.org
mtrofin@chromium.org
mvstanton@chromium.org
mythria@chromium.org
neis@chromium.org
petermarshall@chromium.org
rmcilroy@chromium.org
rossberg@chromium.org
sergiyb@chromium.org
sigurds@chromium.org
tebbi@chromium.org
titzer@chromium.org
ulan@chromium.org
verwaest@chromium.org
vogelheim@chromium.org
yangguo@chromium.org
# TEAM: v8-dev@googlegroups.com

92
deps/v8/PRESUBMIT.py vendored
View File

@ -153,6 +153,62 @@ def _CheckUnwantedDependencies(input_api, output_api):
return results
def _CheckHeadersHaveIncludeGuards(input_api, output_api):
"""Ensures that all header files have include guards."""
file_inclusion_pattern = r'src/.+\.h'
def FilterFile(affected_file):
black_list = (_EXCLUDED_PATHS +
input_api.DEFAULT_BLACK_LIST)
return input_api.FilterSourceFile(
affected_file,
white_list=(file_inclusion_pattern, ),
black_list=black_list)
leading_src_pattern = input_api.re.compile(r'^src/')
dash_dot_slash_pattern = input_api.re.compile(r'[-./]')
def PathToGuardMacro(path):
"""Guards should be of the form V8_PATH_TO_FILE_WITHOUT_SRC_H_."""
x = input_api.re.sub(leading_src_pattern, 'v8_', path)
x = input_api.re.sub(dash_dot_slash_pattern, '_', x)
x = x.upper() + "_"
return x
problems = []
for f in input_api.AffectedSourceFiles(FilterFile):
local_path = f.LocalPath()
guard_macro = PathToGuardMacro(local_path)
guard_patterns = [
input_api.re.compile(r'^#ifndef ' + guard_macro + '$'),
input_api.re.compile(r'^#define ' + guard_macro + '$'),
input_api.re.compile(r'^#endif // ' + guard_macro + '$')]
skip_check_pattern = input_api.re.compile(
r'^// PRESUBMIT_INTENTIONALLY_MISSING_INCLUDE_GUARD')
found_patterns = [ False, False, False ]
file_omitted = False
for line in f.NewContents():
for i in range(len(guard_patterns)):
if guard_patterns[i].match(line):
found_patterns[i] = True
if skip_check_pattern.match(line):
file_omitted = True
break
if not file_omitted and not all(found_patterns):
problems.append(
'%s: Missing include guard \'%s\'' % (local_path, guard_macro))
if problems:
return [output_api.PresubmitError(
'You added one or more header files without an appropriate\n'
'include guard. Add the include guard {#ifndef,#define,#endif}\n'
'triplet or omit the check entirely through the magic comment:\n'
'"// PRESUBMIT_INTENTIONALLY_MISSING_INCLUDE_GUARD".', problems)]
else:
return []
# TODO(mstarzinger): Similar checking should be made available as part of
# tools/presubmit.py (note that tools/check-inline-includes.sh exists).
def _CheckNoInlineHeaderIncludesInNormalHeaders(input_api, output_api):
@ -230,44 +286,10 @@ def _CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api):
return []
def _CheckMissingFiles(input_api, output_api):
"""Runs verify_source_deps.py to ensure no files were added that are not in
GN.
"""
# We need to wait until we have an input_api object and use this
# roundabout construct to import checkdeps because this file is
# eval-ed and thus doesn't have __file__.
original_sys_path = sys.path
try:
sys.path = sys.path + [input_api.os_path.join(
input_api.PresubmitLocalPath(), 'tools')]
from verify_source_deps import missing_gn_files, missing_gyp_files
finally:
# Restore sys.path to what it was before.
sys.path = original_sys_path
gn_files = missing_gn_files()
gyp_files = missing_gyp_files()
results = []
if gn_files:
results.append(output_api.PresubmitError(
"You added one or more source files but didn't update the\n"
"corresponding BUILD.gn files:\n",
gn_files))
if gyp_files:
results.append(output_api.PresubmitError(
"You added one or more source files but didn't update the\n"
"corresponding gyp files:\n",
gyp_files))
return results
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
results.extend(_CheckCommitMessageBugEntry(input_api, output_api))
results.extend(input_api.canned_checks.CheckOwners(
input_api, output_api, source_file_filter=None))
results.extend(input_api.canned_checks.CheckPatchFormatted(
input_api, output_api))
results.extend(input_api.canned_checks.CheckGenderNeutral(
@ -276,9 +298,9 @@ def _CommonChecks(input_api, output_api):
results.extend(_CheckUnwantedDependencies(input_api, output_api))
results.extend(
_CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api))
results.extend(_CheckHeadersHaveIncludeGuards(input_api, output_api))
results.extend(
_CheckNoInlineHeaderIncludesInNormalHeaders(input_api, output_api))
results.extend(_CheckMissingFiles(input_api, output_api))
results.extend(_CheckJSONFiles(input_api, output_api))
results.extend(_CheckMacroUndefs(input_api, output_api))
results.extend(input_api.RunTests(

10
deps/v8/gni/v8.gni vendored
View File

@ -33,7 +33,9 @@ declare_args() {
# Enable the snapshot feature, for fast context creation.
# http://v8project.blogspot.com/2015/09/custom-startup-snapshots.html
v8_use_snapshot = true
# TODO(thakis): Make snapshots work in 64-bit win/cross builds,
# https://803591
v8_use_snapshot = !(is_win && host_os != "win" && target_cpu == "x64")
# Use external files for startup data blobs:
# the JS builtins sources and the start snapshot.
@ -105,6 +107,12 @@ if (is_posix && (v8_enable_backtrace || v8_monolithic)) {
v8_add_configs += [ "//build/config/gcc:symbol_visibility_default" ]
}
# On MIPS gcc_target_rpath and ldso_path might be needed for all builds.
if (target_cpu == "mipsel" || target_cpu == "mips64el" ||
target_cpu == "mips" || target_cpu == "mips64") {
v8_add_configs += [ "//build/config/gcc:rpath_for_built_shared_libraries" ]
}
# All templates should be kept in sync.
template("v8_source_set") {
if (defined(invoker.split_count) && invoker.split_count > 1 &&

View File

@ -8,46 +8,23 @@
'target_name': 'All',
'type': 'none',
'dependencies': [
'../src/d8.gyp:d8',
'../test/inspector/inspector.gyp:*',
'../test/mkgrokdump/mkgrokdump.gyp:*',
'd8.gyp:d8',
'inspector-test.gyp:*',
'mkgrokdump.gyp:*',
],
'conditions': [
['component!="shared_library"', {
'dependencies': [
'../tools/parser-shell.gyp:parser-shell',
'parser-shell.gyp:parser-shell',
],
}],
# These items don't compile for Android on Mac.
['host_os!="mac" or OS!="android"', {
'dependencies': [
'../samples/samples.gyp:*',
'../test/cctest/cctest.gyp:*',
'../test/fuzzer/fuzzer.gyp:*',
'../test/unittests/unittests.gyp:*',
],
}],
['test_isolation_mode != "noop"', {
'dependencies': [
'../test/bot_default.gyp:*',
'../test/benchmarks/benchmarks.gyp:*',
'../test/debugger/debugger.gyp:*',
'../test/default.gyp:*',
'../test/d8_default.gyp:*',
'../test/intl/intl.gyp:*',
'../test/message/message.gyp:*',
'../test/mjsunit/mjsunit.gyp:*',
'../test/mozilla/mozilla.gyp:*',
'../test/optimize_for_size.gyp:*',
'../test/perf.gyp:*',
'../test/preparser/preparser.gyp:*',
'../test/test262/test262.gyp:*',
'../test/webkit/webkit.gyp:*',
'../tools/check-static-initializers.gyp:*',
'../tools/gcmole/run_gcmole.gyp:*',
'../tools/jsfunfuzz/jsfunfuzz.gyp:*',
'../tools/run-deopt-fuzzer.gyp:*',
'../tools/run-num-fuzzer.gyp:*',
'samples.gyp:*',
'cctest.gyp:*',
'fuzzer.gyp:*',
'unittests.gyp:*',
],
}],
]

468
deps/v8/gypfiles/cctest.gyp vendored Normal file
View File

@ -0,0 +1,468 @@
# Copyright 2012 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'v8_code': 1,
'generated_file': '<(SHARED_INTERMEDIATE_DIR)/resources.cc',
'cctest_sources': [
'../test/cctest/compiler/c-signature.h',
'../test/cctest/compiler/call-tester.h',
'../test/cctest/compiler/codegen-tester.cc',
'../test/cctest/compiler/codegen-tester.h',
'../test/cctest/compiler/code-assembler-tester.h',
'../test/cctest/compiler/function-tester.cc',
'../test/cctest/compiler/function-tester.h',
'../test/cctest/compiler/graph-builder-tester.h',
'../test/cctest/compiler/test-basic-block-profiler.cc',
'../test/cctest/compiler/test-branch-combine.cc',
'../test/cctest/compiler/test-run-unwinding-info.cc',
'../test/cctest/compiler/test-gap-resolver.cc',
'../test/cctest/compiler/test-graph-visualizer.cc',
'../test/cctest/compiler/test-code-generator.cc',
'../test/cctest/compiler/test-code-assembler.cc',
'../test/cctest/compiler/test-instruction.cc',
'../test/cctest/compiler/test-js-context-specialization.cc',
'../test/cctest/compiler/test-js-constant-cache.cc',
'../test/cctest/compiler/test-js-typed-lowering.cc',
'../test/cctest/compiler/test-jump-threading.cc',
'../test/cctest/compiler/test-linkage.cc',
'../test/cctest/compiler/test-loop-analysis.cc',
'../test/cctest/compiler/test-machine-operator-reducer.cc',
'../test/cctest/compiler/test-multiple-return.cc',
'../test/cctest/compiler/test-node.cc',
'../test/cctest/compiler/test-operator.cc',
'../test/cctest/compiler/test-representation-change.cc',
'../test/cctest/compiler/test-run-bytecode-graph-builder.cc',
'../test/cctest/compiler/test-run-calls-to-external-references.cc',
'../test/cctest/compiler/test-run-deopt.cc',
'../test/cctest/compiler/test-run-intrinsics.cc',
'../test/cctest/compiler/test-run-jsbranches.cc',
'../test/cctest/compiler/test-run-jscalls.cc',
'../test/cctest/compiler/test-run-jsexceptions.cc',
'../test/cctest/compiler/test-run-jsobjects.cc',
'../test/cctest/compiler/test-run-jsops.cc',
'../test/cctest/compiler/test-run-load-store.cc',
'../test/cctest/compiler/test-run-machops.cc',
'../test/cctest/compiler/test-run-native-calls.cc',
'../test/cctest/compiler/test-run-retpoline.cc',
'../test/cctest/compiler/test-run-stackcheck.cc',
'../test/cctest/compiler/test-run-stubs.cc',
'../test/cctest/compiler/test-run-tail-calls.cc',
'../test/cctest/compiler/test-run-variables.cc',
'../test/cctest/compiler/test-run-wasm-machops.cc',
'../test/cctest/compiler/value-helper.cc',
'../test/cctest/compiler/value-helper.h',
'../test/cctest/cctest.cc',
'../test/cctest/cctest.h',
'../test/cctest/expression-type-collector-macros.h',
'../test/cctest/gay-fixed.cc',
'../test/cctest/gay-fixed.h',
'../test/cctest/gay-precision.cc',
'../test/cctest/gay-precision.h',
'../test/cctest/gay-shortest.cc',
'../test/cctest/gay-shortest.h',
'../test/cctest/heap/heap-tester.h',
'../test/cctest/heap/heap-utils.cc',
'../test/cctest/heap/heap-utils.h',
'../test/cctest/heap/test-alloc.cc',
'../test/cctest/heap/test-array-buffer-tracker.cc',
'../test/cctest/heap/test-compaction.cc',
'../test/cctest/heap/test-concurrent-marking.cc',
'../test/cctest/heap/test-embedder-tracing.cc',
'../test/cctest/heap/test-heap.cc',
'../test/cctest/heap/test-incremental-marking.cc',
'../test/cctest/heap/test-invalidated-slots.cc',
'../test/cctest/heap/test-lab.cc',
'../test/cctest/heap/test-mark-compact.cc',
'../test/cctest/heap/test-page-promotion.cc',
'../test/cctest/heap/test-spaces.cc',
'../test/cctest/interpreter/interpreter-tester.cc',
'../test/cctest/interpreter/interpreter-tester.h',
'../test/cctest/interpreter/source-position-matcher.cc',
'../test/cctest/interpreter/source-position-matcher.h',
'../test/cctest/interpreter/test-bytecode-generator.cc',
'../test/cctest/interpreter/test-interpreter.cc',
'../test/cctest/interpreter/test-interpreter-intrinsics.cc',
'../test/cctest/interpreter/test-source-positions.cc',
'../test/cctest/interpreter/bytecode-expectations-printer.cc',
'../test/cctest/interpreter/bytecode-expectations-printer.h',
'../test/cctest/libplatform/test-tracing.cc',
'../test/cctest/libsampler/test-sampler.cc',
'../test/cctest/parsing/test-parse-decision.cc',
'../test/cctest/parsing/test-preparser.cc',
'../test/cctest/parsing/test-scanner-streams.cc',
'../test/cctest/parsing/test-scanner.cc',
'../test/cctest/print-extension.cc',
'../test/cctest/print-extension.h',
'../test/cctest/profiler-extension.cc',
'../test/cctest/profiler-extension.h',
'../test/cctest/scope-test-helper.h',
'../test/cctest/setup-isolate-for-tests.cc',
'../test/cctest/setup-isolate-for-tests.h',
'../test/cctest/test-access-checks.cc',
'../test/cctest/test-accessor-assembler.cc',
'../test/cctest/test-accessors.cc',
'../test/cctest/test-allocation.cc',
'../test/cctest/test-api.cc',
'../test/cctest/test-api.h',
'../test/cctest/test-api-accessors.cc',
'../test/cctest/test-api-interceptors.cc',
'../test/cctest/test-array-list.cc',
'../test/cctest/test-atomicops.cc',
'../test/cctest/test-bignum.cc',
'../test/cctest/test-bignum-dtoa.cc',
'../test/cctest/test-bit-vector.cc',
'../test/cctest/test-circular-queue.cc',
'../test/cctest/test-code-layout.cc',
'../test/cctest/test-code-stub-assembler.cc',
'../test/cctest/test-compiler.cc',
'../test/cctest/test-constantpool.cc',
'../test/cctest/test-conversions.cc',
'../test/cctest/test-cpu-profiler.cc',
'../test/cctest/test-date.cc',
'../test/cctest/test-debug.cc',
'../test/cctest/test-decls.cc',
'../test/cctest/test-deoptimization.cc',
'../test/cctest/test-dictionary.cc',
'../test/cctest/test-diy-fp.cc',
'../test/cctest/test-double.cc',
'../test/cctest/test-dtoa.cc',
'../test/cctest/test-elements-kind.cc',
'../test/cctest/test-fast-dtoa.cc',
'../test/cctest/test-feedback-vector.cc',
'../test/cctest/test-feedback-vector.h',
'../test/cctest/test-field-type-tracking.cc',
'../test/cctest/test-fixed-dtoa.cc',
'../test/cctest/test-flags.cc',
'../test/cctest/test-func-name-inference.cc',
'../test/cctest/test-global-handles.cc',
'../test/cctest/test-global-object.cc',
'../test/cctest/test-hashcode.cc',
'../test/cctest/test-hashmap.cc',
'../test/cctest/test-heap-profiler.cc',
'../test/cctest/test-identity-map.cc',
'../test/cctest/test-intl.cc',
'../test/cctest/test-inobject-slack-tracking.cc',
'../test/cctest/test-isolate-independent-builtins.cc',
'../test/cctest/test-liveedit.cc',
'../test/cctest/test-lockers.cc',
'../test/cctest/test-log.cc',
'../test/cctest/test-managed.cc',
'../test/cctest/test-mementos.cc',
'../test/cctest/test-modules.cc',
'../test/cctest/test-object.cc',
'../test/cctest/test-orderedhashtable.cc',
'../test/cctest/test-parsing.cc',
'../test/cctest/test-platform.cc',
'../test/cctest/test-profile-generator.cc',
'../test/cctest/test-random-number-generator.cc',
'../test/cctest/test-regexp.cc',
'../test/cctest/test-representation.cc',
'../test/cctest/test-sampler-api.cc',
'../test/cctest/test-serialize.cc',
'../test/cctest/test-strings.cc',
'../test/cctest/test-symbols.cc',
'../test/cctest/test-strtod.cc',
'../test/cctest/test-thread-termination.cc',
'../test/cctest/test-threads.cc',
'../test/cctest/test-trace-event.cc',
'../test/cctest/test-traced-value.cc',
'../test/cctest/test-transitions.cc',
'../test/cctest/test-transitions.h',
'../test/cctest/test-typedarrays.cc',
'../test/cctest/test-types.cc',
'../test/cctest/test-unbound-queue.cc',
'../test/cctest/test-unboxed-doubles.cc',
'../test/cctest/test-unscopables-hidden-prototype.cc',
'../test/cctest/test-usecounters.cc',
'../test/cctest/test-utils.cc',
'../test/cctest/test-version.cc',
'../test/cctest/test-weakmaps.cc',
'../test/cctest/test-weaksets.cc',
'../test/cctest/trace-extension.cc',
'../test/cctest/trace-extension.h',
'../test/cctest/types-fuzz.h',
'../test/cctest/unicode-helpers.h',
'../test/cctest/wasm/test-c-wasm-entry.cc',
'../test/cctest/wasm/test-streaming-compilation.cc',
'../test/cctest/wasm/test-run-wasm.cc',
'../test/cctest/wasm/test-run-wasm-64.cc',
'../test/cctest/wasm/test-run-wasm-asmjs.cc',
'../test/cctest/wasm/test-run-wasm-atomics.cc',
'../test/cctest/wasm/test-run-wasm-interpreter.cc',
'../test/cctest/wasm/test-run-wasm-js.cc',
'../test/cctest/wasm/test-run-wasm-module.cc',
'../test/cctest/wasm/test-run-wasm-relocation.cc',
'../test/cctest/wasm/test-run-wasm-sign-extension.cc',
'../test/cctest/wasm/test-run-wasm-simd.cc',
'../test/cctest/wasm/test-wasm-breakpoints.cc',
"../test/cctest/wasm/test-wasm-codegen.cc",
'../test/cctest/wasm/test-wasm-interpreter-entry.cc',
'../test/cctest/wasm/test-wasm-stack.cc',
'../test/cctest/wasm/test-wasm-trap-position.cc',
'../test/cctest/wasm/wasm-run-utils.cc',
'../test/cctest/wasm/wasm-run-utils.h',
],
'cctest_sources_ia32': [
'../test/cctest/test-assembler-ia32.cc',
'../test/cctest/test-code-stubs.cc',
'../test/cctest/test-code-stubs.h',
'../test/cctest/test-code-stubs-ia32.cc',
'../test/cctest/test-disasm-ia32.cc',
'../test/cctest/test-log-stack-tracer.cc',
'../test/cctest/test-run-wasm-relocation-ia32.cc',
],
'cctest_sources_x64': [
'../test/cctest/test-assembler-x64.cc',
'../test/cctest/test-code-stubs.cc',
'../test/cctest/test-code-stubs.h',
'../test/cctest/test-code-stubs-x64.cc',
'../test/cctest/test-disasm-x64.cc',
'../test/cctest/test-macro-assembler-x64.cc',
'../test/cctest/test-log-stack-tracer.cc',
'../test/cctest/test-run-wasm-relocation-x64.cc',
],
'cctest_sources_arm': [
'../test/cctest/assembler-helper-arm.cc',
'../test/cctest/assembler-helper-arm.h',
'../test/cctest/test-assembler-arm.cc',
'../test/cctest/test-code-stubs.cc',
'../test/cctest/test-code-stubs.h',
'../test/cctest/test-code-stubs-arm.cc',
'../test/cctest/test-disasm-arm.cc',
'../test/cctest/test-macro-assembler-arm.cc',
'../test/cctest/test-run-wasm-relocation-arm.cc',
'../test/cctest/test-sync-primitives-arm.cc',
],
'cctest_sources_arm64': [
'../test/cctest/test-utils-arm64.cc',
'../test/cctest/test-utils-arm64.h',
'../test/cctest/test-assembler-arm64.cc',
'../test/cctest/test-code-stubs.cc',
'../test/cctest/test-code-stubs.h',
'../test/cctest/test-code-stubs-arm64.cc',
'../test/cctest/test-disasm-arm64.cc',
'../test/cctest/test-fuzz-arm64.cc',
'../test/cctest/test-javascript-arm64.cc',
'../test/cctest/test-js-arm64-variables.cc',
'../test/cctest/test-run-wasm-relocation-arm64.cc',
'../test/cctest/test-sync-primitives-arm64.cc',
],
'cctest_sources_s390': [
'../test/cctest/test-assembler-s390.cc',
'../test/cctest/test-code-stubs.cc',
'../test/cctest/test-code-stubs.h',
'../test/cctest/test-disasm-s390.cc',
],
'cctest_sources_ppc': [
'../test/cctest/test-assembler-ppc.cc',
'../test/cctest/test-code-stubs.cc',
'../test/cctest/test-code-stubs.h',
'../test/cctest/test-disasm-ppc.cc',
],
'cctest_sources_mips': [
'../test/cctest/test-assembler-mips.cc',
'../test/cctest/test-code-stubs.cc',
'../test/cctest/test-code-stubs.h',
'../test/cctest/test-code-stubs-mips.cc',
'../test/cctest/test-disasm-mips.cc',
'../test/cctest/test-macro-assembler-mips.cc',
],
'cctest_sources_mipsel': [
'../test/cctest/test-assembler-mips.cc',
'../test/cctest/test-code-stubs.cc',
'../test/cctest/test-code-stubs.h',
'../test/cctest/test-code-stubs-mips.cc',
'../test/cctest/test-disasm-mips.cc',
'../test/cctest/test-macro-assembler-mips.cc',
],
'cctest_sources_mips64': [
'../test/cctest/test-assembler-mips64.cc',
'../test/cctest/test-code-stubs.cc',
'../test/cctest/test-code-stubs.h',
'../test/cctest/test-code-stubs-mips64.cc',
'../test/cctest/test-disasm-mips64.cc',
'../test/cctest/test-macro-assembler-mips64.cc',
],
'cctest_sources_mips64el': [
'../test/cctest/test-assembler-mips64.cc',
'../test/cctest/test-code-stubs.cc',
'../test/cctest/test-code-stubs.h',
'../test/cctest/test-code-stubs-mips64.cc',
'../test/cctest/test-disasm-mips64.cc',
'../test/cctest/test-macro-assembler-mips64.cc',
],
},
'includes': ['toolchain.gypi', 'features.gypi'],
'targets': [
{
'target_name': 'cctest',
'type': 'executable',
'dependencies': [
'resources',
'v8.gyp:v8_libbase',
'v8.gyp:v8_libplatform',
],
'include_dirs': [
'..',
],
'sources': [
'../test/common/wasm/flag-utils.h',
'../test/common/wasm/test-signatures.h',
'../test/common/wasm/wasm-macro-gen.h',
'../test/common/wasm/wasm-module-runner.cc',
'../test/common/wasm/wasm-module-runner.h',
'<@(cctest_sources)',
'<(generated_file)',
],
'conditions': [
['v8_target_arch=="ia32"', {
'sources': [
'<@(cctest_sources_ia32)',
],
}],
['v8_target_arch=="x64"', {
'sources': [
'<@(cctest_sources_x64)',
],
}],
['v8_target_arch=="arm"', {
'sources': [
'<@(cctest_sources_arm)',
],
}],
['v8_target_arch=="arm64"', {
'sources': [
'<@(cctest_sources_arm64)',
],
}],
['v8_target_arch=="s390"', {
'sources': [
'<@(cctest_sources_s390)',
],
}],
['v8_target_arch=="s390x"', {
'sources': [
'<@(cctest_sources_s390)',
],
}],
['v8_target_arch=="ppc"', {
'sources': [
'<@(cctest_sources_ppc)',
],
}],
['v8_target_arch=="ppc64"', {
'sources': [
'<@(cctest_sources_ppc)',
],
}],
['v8_target_arch=="mips"', {
'sources': [
'<@(cctest_sources_mips)',
],
}],
['v8_target_arch=="mipsel"', {
'sources': [
'<@(cctest_sources_mipsel)',
],
}],
['v8_target_arch=="mips64"', {
'sources': [
'<@(cctest_sources_mips64)',
],
}],
['v8_target_arch=="mips64el"', {
'sources': [
'<@(cctest_sources_mips64el)',
],
}],
[ 'OS=="win"', {
'msvs_settings': {
'VCCLCompilerTool': {
# MSVS wants this for gay-{precision,shortest}.cc.
'AdditionalOptions': ['/bigobj'],
},
},
}],
['v8_target_arch=="ppc" or v8_target_arch=="ppc64" \
or v8_target_arch=="arm" or v8_target_arch=="arm64" \
or v8_target_arch=="s390" or v8_target_arch=="s390x" \
or v8_target_arch=="mips" or v8_target_arch=="mips64" \
or v8_target_arch=="mipsel" or v8_target_arch=="mips64el"', {
# disable fmadd/fmsub so that expected results match generated code in
# RunFloat64MulAndFloat64Add1 and friends.
'cflags': ['-ffp-contract=off'],
}],
['OS=="aix"', {
'ldflags': [ '-Wl,-bbigtoc' ],
}],
['component=="shared_library"', {
# cctest can't be built against a shared library, so we need to
# depend on the underlying static target in that case.
'dependencies': ['v8.gyp:v8_maybe_snapshot'],
'defines': [ 'BUILDING_V8_SHARED', ]
}, {
'dependencies': ['v8.gyp:v8'],
}],
['v8_use_snapshot=="true"', {
'dependencies': ['v8.gyp:v8_initializers'],
}],
],
},
{
'target_name': 'resources',
'type': 'none',
'variables': {
'file_list': [
'../tools/splaytree.js',
'../tools/codemap.js',
'../tools/csvparser.js',
'../tools/consarray.js',
'../tools/profile.js',
'../tools/profile_view.js',
'../tools/arguments.js',
'../tools/logreader.js',
'../test/cctest/log-eq-of-logging-and-traversal.js',
],
},
'actions': [
{
'action_name': 'js2c',
'inputs': [
'../tools/js2c.py',
'<@(file_list)',
],
'outputs': [
'<(generated_file)',
],
'action': [
'python',
'../tools/js2c.py',
'<@(_outputs)',
'TEST', # type
'<@(file_list)',
],
}
],
},
{
'target_name': 'generate-bytecode-expectations',
'type': 'executable',
'dependencies': [
'v8.gyp:v8',
'v8.gyp:v8_libbase',
'v8.gyp:v8_libplatform',
],
'include_dirs+': [
'..',
],
'sources': [
'../test/cctest/interpreter/bytecode-expectations-printer.cc',
'../test/cctest/interpreter/bytecode-expectations-printer.h',
'../test/cctest/interpreter/generate-bytecode-expectations.cc',
],
},
],
}

0
deps/v8/gypfiles/coverage_wrapper.py vendored Executable file → Normal file
View File

View File

@ -1,29 +1,6 @@
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
@ -32,7 +9,7 @@
'v8_enable_vtunejit%': 0,
'v8_enable_i18n_support%': 1,
},
'includes': ['../gypfiles/toolchain.gypi', '../gypfiles/features.gypi'],
'includes': ['toolchain.gypi', 'features.gypi'],
'targets': [
{
'target_name': 'd8',
@ -48,10 +25,10 @@
'<(DEPTH)',
],
'sources': [
'd8.h',
'd8.cc',
'd8-console.h',
'd8-console.cc',
'../src/d8.h',
'../src/d8.cc',
'../src/d8-console.h',
'../src/d8-console.cc',
'<(SHARED_INTERMEDIATE_DIR)/d8-js.cc',
],
'conditions': [
@ -68,10 +45,10 @@
['(OS=="linux" or OS=="mac" or OS=="freebsd" or OS=="netbsd" \
or OS=="openbsd" or OS=="solaris" or OS=="android" \
or OS=="qnx" or OS=="aix")', {
'sources': [ 'd8-posix.cc', ]
'sources': [ '../src/d8-posix.cc', ]
}],
[ 'OS=="win"', {
'sources': [ 'd8-windows.cc', ]
'sources': [ '../src/d8-windows.cc', ]
}],
[ 'component!="shared_library"', {
'conditions': [
@ -86,7 +63,7 @@
}],
['v8_enable_vtunejit==1', {
'dependencies': [
'../src/third_party/vtune/v8vtune.gyp:v8_vtune',
'v8vtune.gyp:v8_vtune',
],
}],
['v8_enable_i18n_support==1', {
@ -107,8 +84,8 @@
'type': 'none',
'variables': {
'js_files': [
'd8.js',
'js/macros.py',
'../src/d8.js',
'../src/js/macros.py',
],
},
'conditions': [
@ -139,23 +116,4 @@
],
},
],
'conditions': [
['test_isolation_mode != "noop"', {
'targets': [
{
'target_name': 'd8_run',
'type': 'none',
'dependencies': [
'd8',
],
'includes': [
'../gypfiles/isolate.gypi',
],
'sources': [
'd8.isolate',
],
},
],
}],
],
}

View File

@ -87,6 +87,9 @@
# Enable concurrent marking.
'v8_enable_concurrent_marking%': 1,
# Enables various testing features.
'v8_enable_test_features%': 0,
# Controls the threshold for on-heap/off-heap Typed Arrays.
'v8_typed_array_max_size_in_heap%': 64,
@ -119,6 +122,9 @@
['v8_trace_maps==1', {
'defines': ['V8_TRACE_MAPS',],
}],
['v8_enable_test_features==1', {
'defines': ['V8_ENABLE_ALLOCATION_TIMEOUT', 'V8_ENABLE_FORCE_SLOW_PATH'],
}],
['v8_enable_verify_predictable==1', {
'defines': ['VERIFY_PREDICTABLE',],
}],
@ -164,7 +170,7 @@
}, # Debug
'Release': {
'variables': {
'v8_enable_handle_zapping%': 0,
'v8_enable_handle_zapping%': 1,
},
'conditions': [
['v8_enable_handle_zapping==1', {

View File

@ -6,7 +6,7 @@
'variables': {
'v8_code': 1,
},
'includes': ['../../gypfiles/toolchain.gypi', '../../gypfiles/features.gypi'],
'includes': ['toolchain.gypi', 'features.gypi'],
'targets': [
{
'target_name': 'v8_simple_json_fuzzer',
@ -15,24 +15,24 @@
'json_fuzzer_lib',
],
'include_dirs': [
'../..',
'..',
],
'sources': [
'fuzzer.cc',
'../test/fuzzer/fuzzer.cc',
],
},
{
'target_name': 'json_fuzzer_lib',
'type': 'static_library',
'dependencies': [
'../../src/v8.gyp:v8_libplatform',
'v8.gyp:v8_libplatform',
'fuzzer_support',
],
'include_dirs': [
'../..',
'..',
],
'sources': [ ### gcmole(all) ###
'json.cc',
'sources': [
'../test/fuzzer/json.cc',
],
},
{
@ -42,24 +42,52 @@
'parser_fuzzer_lib',
],
'include_dirs': [
'../..',
'..',
],
'sources': [
'fuzzer.cc',
'../test/fuzzer/fuzzer.cc',
],
},
{
'target_name': 'parser_fuzzer_lib',
'type': 'static_library',
'dependencies': [
'../../src/v8.gyp:v8_libplatform',
'v8.gyp:v8_libplatform',
'fuzzer_support',
],
'include_dirs': [
'../..',
'..',
],
'sources': [ ### gcmole(all) ###
'parser.cc',
'sources': [
'../test/fuzzer/parser.cc',
],
},
{
'target_name': 'v8_simple_regexp_builtins_fuzzer',
'type': 'executable',
'dependencies': [
'regexp_builtins_fuzzer_lib',
],
'include_dirs': [
'..',
],
'sources': [
'../test/fuzzer/fuzzer.cc',
],
},
{
'target_name': 'regexp_builtins_fuzzer_lib',
'type': 'static_library',
'dependencies': [
'v8.gyp:v8_libplatform',
'fuzzer_support',
],
'include_dirs': [
'..',
],
'sources': [
'../test/fuzzer/regexp-builtins.cc',
'../test/fuzzer/regexp_builtins/mjsunit.js.h',
],
},
{
@ -69,24 +97,24 @@
'regexp_fuzzer_lib',
],
'include_dirs': [
'../..',
'..',
],
'sources': [
'fuzzer.cc',
'../test/fuzzer/fuzzer.cc',
],
},
{
'target_name': 'regexp_fuzzer_lib',
'type': 'static_library',
'dependencies': [
'../../src/v8.gyp:v8_libplatform',
'v8.gyp:v8_libplatform',
'fuzzer_support',
],
'include_dirs': [
'../..',
'..',
],
'sources': [ ### gcmole(all) ###
'regexp.cc',
'sources': [
'../test/fuzzer/regexp.cc',
],
},
{
@ -96,27 +124,27 @@
'multi_return_fuzzer_lib',
],
'include_dirs': [
'../..',
'..',
],
'sources': [
'fuzzer.cc',
'../test/fuzzer/fuzzer.cc',
],
},
{
'target_name': 'multi_return_fuzzer_lib',
'type': 'static_library',
'dependencies': [
'../../src/v8.gyp:v8_libplatform',
'v8.gyp:v8_libplatform',
'fuzzer_support',
],
'include_dirs': [
'../..',
'..',
],
'sources': [ ### gcmole(all) ###
'../compiler/c-signature.h',
'../compiler/call-helper.h',
'../compiler/raw-machine-assembler-tester.h',
'multi-return.cc',
'sources': [
'../test/cctest/compiler/c-signature.h',
'../test/cctest/compiler/call-helper.h',
'../test/cctest/compiler/raw-machine-assembler-tester.h',
'../test/fuzzer/multi-return.cc',
],
},
{
@ -126,28 +154,28 @@
'wasm_fuzzer_lib',
],
'include_dirs': [
'../..',
'..',
],
'sources': [
'fuzzer.cc',
'../test/fuzzer/fuzzer.cc',
],
},
{
'target_name': 'wasm_fuzzer_lib',
'type': 'static_library',
'dependencies': [
'../../src/v8.gyp:v8_libplatform',
'v8.gyp:v8_libplatform',
'fuzzer_support',
],
'include_dirs': [
'../..',
'..',
],
'sources': [ ### gcmole(all) ###
'wasm.cc',
'../common/wasm/wasm-module-runner.cc',
'../common/wasm/wasm-module-runner.h',
'wasm-fuzzer-common.cc',
'wasm-fuzzer-common.h',
'sources': [
'../test/fuzzer/wasm.cc',
'../test/common/wasm/wasm-module-runner.cc',
'../test/common/wasm/wasm-module-runner.h',
'../test/fuzzer/wasm-fuzzer-common.cc',
'../test/fuzzer/wasm-fuzzer-common.h',
],
},
{
@ -157,60 +185,28 @@
'wasm_async_fuzzer_lib',
],
'include_dirs': [
'../..',
'..',
],
'sources': [
'fuzzer.cc',
'../test/fuzzer/fuzzer.cc',
],
},
{
'target_name': 'wasm_async_fuzzer_lib',
'type': 'static_library',
'dependencies': [
'../../src/v8.gyp:v8_libplatform',
'v8.gyp:v8_libplatform',
'fuzzer_support',
],
'include_dirs': [
'../..',
],
'sources': [ ### gcmole(all) ###
'wasm-async.cc',
'../common/wasm/wasm-module-runner.cc',
'../common/wasm/wasm-module-runner.h',
'wasm-fuzzer-common.cc',
'wasm-fuzzer-common.h',
],
},
{
'target_name': 'v8_simple_wasm_call_fuzzer',
'type': 'executable',
'dependencies': [
'wasm_call_fuzzer_lib',
],
'include_dirs': [
'../..',
'..',
],
'sources': [
'fuzzer.cc',
],
},
{
'target_name': 'wasm_call_fuzzer_lib',
'type': 'static_library',
'dependencies': [
'../../src/v8.gyp:v8_libplatform',
'fuzzer_support',
],
'include_dirs': [
'../..',
],
'sources': [ ### gcmole(all) ###
'wasm-call.cc',
'../common/wasm/test-signatures.h',
'../common/wasm/wasm-module-runner.cc',
'../common/wasm/wasm-module-runner.h',
'wasm-fuzzer-common.cc',
'wasm-fuzzer-common.h',
'../test/fuzzer/wasm-async.cc',
'../test/common/wasm/wasm-module-runner.cc',
'../test/common/wasm/wasm-module-runner.h',
'../test/fuzzer/wasm-fuzzer-common.cc',
'../test/fuzzer/wasm-fuzzer-common.h',
],
},
{
@ -220,29 +216,29 @@
'wasm_code_fuzzer_lib',
],
'include_dirs': [
'../..',
'..',
],
'sources': [
'fuzzer.cc',
'../test/fuzzer/fuzzer.cc',
],
},
{
'target_name': 'wasm_code_fuzzer_lib',
'type': 'static_library',
'dependencies': [
'../../src/v8.gyp:v8_libplatform',
'v8.gyp:v8_libplatform',
'fuzzer_support',
],
'include_dirs': [
'../..',
'..',
],
'sources': [ ### gcmole(all) ###
'wasm-code.cc',
'../common/wasm/test-signatures.h',
'../common/wasm/wasm-module-runner.cc',
'../common/wasm/wasm-module-runner.h',
'wasm-fuzzer-common.cc',
'wasm-fuzzer-common.h',
'sources': [
'../test/fuzzer/wasm-code.cc',
'../test/common/wasm/test-signatures.h',
'../test/common/wasm/wasm-module-runner.cc',
'../test/common/wasm/wasm-module-runner.h',
'../test/fuzzer/wasm-fuzzer-common.cc',
'../test/fuzzer/wasm-fuzzer-common.h',
],
},
{
@ -252,29 +248,29 @@
'wasm_compile_fuzzer_lib',
],
'include_dirs': [
'../..',
'..',
],
'sources': [
'fuzzer.cc',
'../test/fuzzer/fuzzer.cc',
],
},
{
'target_name': 'wasm_compile_fuzzer_lib',
'type': 'static_library',
'dependencies': [
'../../src/v8.gyp:v8_libplatform',
'v8.gyp:v8_libplatform',
'fuzzer_support',
],
'include_dirs': [
'../..',
'..',
],
'sources': [ ### gcmole(all) ###
'wasm-compile.cc',
'../common/wasm/test-signatures.h',
'../common/wasm/wasm-module-runner.cc',
'../common/wasm/wasm-module-runner.h',
'wasm-fuzzer-common.cc',
'wasm-fuzzer-common.h',
'sources': [
'../test/fuzzer/wasm-compile.cc',
'../test/common/wasm/test-signatures.h',
'../test/common/wasm/wasm-module-runner.cc',
'../test/common/wasm/wasm-module-runner.h',
'../test/fuzzer/wasm-fuzzer-common.cc',
'../test/fuzzer/wasm-fuzzer-common.h',
],
},
{
@ -284,28 +280,28 @@
'wasm_data_section_fuzzer_lib',
],
'include_dirs': [
'../..',
'..',
],
'sources': [
'fuzzer.cc',
'../test/fuzzer/fuzzer.cc',
],
},
{
'target_name': 'wasm_data_section_fuzzer_lib',
'type': 'static_library',
'dependencies': [
'../../src/v8.gyp:v8_libplatform',
'v8.gyp:v8_libplatform',
'fuzzer_support',
],
'include_dirs': [
'../..',
'..',
],
'sources': [ ### gcmole(all) ###
'wasm-data-section.cc',
'../common/wasm/wasm-module-runner.cc',
'../common/wasm/wasm-module-runner.h',
'wasm-fuzzer-common.cc',
'wasm-fuzzer-common.h',
'sources': [
'../test/fuzzer/wasm-data-section.cc',
'../test/common/wasm/wasm-module-runner.cc',
'../test/common/wasm/wasm-module-runner.h',
'../test/fuzzer/wasm-fuzzer-common.cc',
'../test/fuzzer/wasm-fuzzer-common.h',
],
},
{
@ -315,28 +311,28 @@
'wasm_function_sigs_section_fuzzer_lib',
],
'include_dirs': [
'../..',
'..',
],
'sources': [
'fuzzer.cc',
'../test/fuzzer/fuzzer.cc',
],
},
{
'target_name': 'wasm_function_sigs_section_fuzzer_lib',
'type': 'static_library',
'dependencies': [
'../../src/v8.gyp:v8_libplatform',
'v8.gyp:v8_libplatform',
'fuzzer_support',
],
'include_dirs': [
'../..',
'..',
],
'sources': [ ### gcmole(all) ###
'wasm-function-sigs-section.cc',
'../common/wasm/wasm-module-runner.cc',
'../common/wasm/wasm-module-runner.h',
'wasm-fuzzer-common.cc',
'wasm-fuzzer-common.h',
'sources': [
'../test/fuzzer/wasm-function-sigs-section.cc',
'../test/common/wasm/wasm-module-runner.cc',
'../test/common/wasm/wasm-module-runner.h',
'../test/fuzzer/wasm-fuzzer-common.cc',
'../test/fuzzer/wasm-fuzzer-common.h',
],
},
{
@ -346,28 +342,28 @@
'wasm_globals_section_fuzzer_lib',
],
'include_dirs': [
'../..',
'..',
],
'sources': [
'fuzzer.cc',
'../test/fuzzer/fuzzer.cc',
],
},
{
'target_name': 'wasm_globals_section_fuzzer_lib',
'type': 'static_library',
'dependencies': [
'../../src/v8.gyp:v8_libplatform',
'v8.gyp:v8_libplatform',
'fuzzer_support',
],
'include_dirs': [
'../..',
'..',
],
'sources': [ ### gcmole(all) ###
'wasm-globals-section.cc',
'../common/wasm/wasm-module-runner.cc',
'../common/wasm/wasm-module-runner.h',
'wasm-fuzzer-common.cc',
'wasm-fuzzer-common.h',
'sources': [
'../test/fuzzer/wasm-globals-section.cc',
'../test/common/wasm/wasm-module-runner.cc',
'../test/common/wasm/wasm-module-runner.h',
'../test/fuzzer/wasm-fuzzer-common.cc',
'../test/fuzzer/wasm-fuzzer-common.h',
],
},
{
@ -377,28 +373,28 @@
'wasm_imports_section_fuzzer_lib',
],
'include_dirs': [
'../..',
'..',
],
'sources': [
'fuzzer.cc',
'../test/fuzzer/fuzzer.cc',
],
},
{
'target_name': 'wasm_imports_section_fuzzer_lib',
'type': 'static_library',
'dependencies': [
'../../src/v8.gyp:v8_libplatform',
'v8.gyp:v8_libplatform',
'fuzzer_support',
],
'include_dirs': [
'../..',
'..',
],
'sources': [ ### gcmole(all) ###
'wasm-imports-section.cc',
'../common/wasm/wasm-module-runner.cc',
'../common/wasm/wasm-module-runner.h',
'wasm-fuzzer-common.cc',
'wasm-fuzzer-common.h',
'sources': [
'../test/fuzzer/wasm-imports-section.cc',
'../test/common/wasm/wasm-module-runner.cc',
'../test/common/wasm/wasm-module-runner.h',
'../test/fuzzer/wasm-fuzzer-common.cc',
'../test/fuzzer/wasm-fuzzer-common.h',
],
},
{
@ -408,28 +404,28 @@
'wasm_memory_section_fuzzer_lib',
],
'include_dirs': [
'../..',
'..',
],
'sources': [
'fuzzer.cc',
'../test/fuzzer/fuzzer.cc',
],
},
{
'target_name': 'wasm_memory_section_fuzzer_lib',
'type': 'static_library',
'dependencies': [
'../../src/v8.gyp:v8_libplatform',
'v8.gyp:v8_libplatform',
'fuzzer_support',
],
'include_dirs': [
'../..',
'..',
],
'sources': [ ### gcmole(all) ###
'wasm-memory-section.cc',
'../common/wasm/wasm-module-runner.cc',
'../common/wasm/wasm-module-runner.h',
'wasm-fuzzer-common.cc',
'wasm-fuzzer-common.h',
'sources': [
'../test/fuzzer/wasm-memory-section.cc',
'../test/common/wasm/wasm-module-runner.cc',
'../test/common/wasm/wasm-module-runner.h',
'../test/fuzzer/wasm-fuzzer-common.cc',
'../test/fuzzer/wasm-fuzzer-common.h',
],
},
{
@ -439,28 +435,28 @@
'wasm_names_section_fuzzer_lib',
],
'include_dirs': [
'../..',
'..',
],
'sources': [
'fuzzer.cc',
'../test/fuzzer/fuzzer.cc',
],
},
{
'target_name': 'wasm_names_section_fuzzer_lib',
'type': 'static_library',
'dependencies': [
'../../src/v8.gyp:v8_libplatform',
'v8.gyp:v8_libplatform',
'fuzzer_support',
],
'include_dirs': [
'../..',
'..',
],
'sources': [ ### gcmole(all) ###
'wasm-names-section.cc',
'../common/wasm/wasm-module-runner.cc',
'../common/wasm/wasm-module-runner.h',
'wasm-fuzzer-common.cc',
'wasm-fuzzer-common.h',
'sources': [
'../test/fuzzer/wasm-names-section.cc',
'../test/common/wasm/wasm-module-runner.cc',
'../test/common/wasm/wasm-module-runner.h',
'../test/fuzzer/wasm-fuzzer-common.cc',
'../test/fuzzer/wasm-fuzzer-common.h',
],
},
{
@ -470,44 +466,44 @@
'wasm_types_section_fuzzer_lib',
],
'include_dirs': [
'../..',
'..',
],
'sources': [
'fuzzer.cc',
'../test/fuzzer/fuzzer.cc',
],
},
{
'target_name': 'wasm_types_section_fuzzer_lib',
'type': 'static_library',
'dependencies': [
'../../src/v8.gyp:v8_libplatform',
'v8.gyp:v8_libplatform',
'fuzzer_support',
],
'include_dirs': [
'../..',
'..',
],
'sources': [ ### gcmole(all) ###
'wasm-types-section.cc',
'../common/wasm/wasm-module-runner.cc',
'../common/wasm/wasm-module-runner.h',
'wasm-fuzzer-common.cc',
'wasm-fuzzer-common.h',
'sources': [
'../test/fuzzer/wasm-types-section.cc',
'../test/common/wasm/wasm-module-runner.cc',
'../test/common/wasm/wasm-module-runner.h',
'../test/fuzzer/wasm-fuzzer-common.cc',
'../test/fuzzer/wasm-fuzzer-common.h',
],
},
{
'target_name': 'fuzzer_support',
'type': 'static_library',
'dependencies': [
'../../src/v8.gyp:v8',
'../../src/v8.gyp:v8_libbase',
'../../src/v8.gyp:v8_libplatform',
'v8.gyp:v8',
'v8.gyp:v8_libbase',
'v8.gyp:v8_libplatform',
],
'include_dirs': [
'../..',
'..',
],
'sources': [ ### gcmole(all) ###
'fuzzer-support.cc',
'fuzzer-support.h',
'sources': [
'../test/fuzzer/fuzzer-support.cc',
'../test/fuzzer/fuzzer-support.h',
],
'conditions': [
['v8_enable_i18n_support==1', {
@ -519,26 +515,4 @@
],
},
],
'conditions': [
['test_isolation_mode != "noop"', {
'targets': [
{
'target_name': 'fuzzer_run',
'type': 'none',
'dependencies': [
'v8_simple_json_fuzzer',
'v8_simple_parser_fuzzer',
'v8_simple_regexp_fuzzer',
'v8_simple_wasm_fuzzer',
],
'includes': [
'../../gypfiles/isolate.gypi',
],
'sources': [
'fuzzer.isolate',
],
},
],
}],
],
}

72
deps/v8/gypfiles/gmock.gyp vendored Normal file
View File

@ -0,0 +1,72 @@
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'gmock',
'type': 'static_library',
'dependencies': [
'gtest.gyp:gtest',
],
'sources': [
# Sources based on files in r173 of gmock.
'../testing/gmock/include/gmock/gmock-actions.h',
'../testing/gmock/include/gmock/gmock-cardinalities.h',
'../testing/gmock/include/gmock/gmock-generated-actions.h',
'../testing/gmock/include/gmock/gmock-generated-function-mockers.h',
'../testing/gmock/include/gmock/gmock-generated-matchers.h',
'../testing/gmock/include/gmock/gmock-generated-nice-strict.h',
'../testing/gmock/include/gmock/gmock-matchers.h',
'../testing/gmock/include/gmock/gmock-spec-builders.h',
'../testing/gmock/include/gmock/gmock.h',
'../testing/gmock/include/gmock/internal/gmock-generated-internal-utils.h',
'../testing/gmock/include/gmock/internal/gmock-internal-utils.h',
'../testing/gmock/include/gmock/internal/gmock-port.h',
'../testing/gmock/src/gmock-all.cc',
'../testing/gmock/src/gmock-cardinalities.cc',
'../testing/gmock/src/gmock-internal-utils.cc',
'../testing/gmock/src/gmock-matchers.cc',
'../testing/gmock/src/gmock-spec-builders.cc',
'../testing/gmock/src/gmock.cc',
'../testing/gmock-support.h', # gMock helpers
'../testing/gmock_custom/gmock/internal/custom/gmock-port.h',
],
'sources!': [
'../testing/gmock/src/gmock-all.cc', # Not needed by our build.
],
'include_dirs': [
'../testing/gmock_custom',
'../testing/gmock',
'../testing/gmock/include',
],
'all_dependent_settings': {
'include_dirs': [
'../testing/gmock_custom',
'../testing/gmock/include', # So that gmock headers can find themselves.
],
},
'export_dependent_settings': [
'gtest.gyp:gtest',
],
'conditions': [
['want_separate_host_toolset==1', {
'toolsets': ['host', 'target'],
}, {
'toolsets': ['target'],
}],
],
},
{
'target_name': 'gmock_main',
'type': 'static_library',
'dependencies': [
'gmock',
],
'sources': [
'../testing/gmock/src/gmock_main.cc',
],
},
],
}

View File

@ -9,42 +9,42 @@
'toolsets': ['host', 'target'],
'type': 'static_library',
'sources': [
'gtest/include/gtest/gtest-death-test.h',
'gtest/include/gtest/gtest-message.h',
'gtest/include/gtest/gtest-param-test.h',
'gtest/include/gtest/gtest-printers.h',
'gtest/include/gtest/gtest-spi.h',
'gtest/include/gtest/gtest-test-part.h',
'gtest/include/gtest/gtest-typed-test.h',
'gtest/include/gtest/gtest.h',
'gtest/include/gtest/gtest_pred_impl.h',
'gtest/include/gtest/internal/gtest-death-test-internal.h',
'gtest/include/gtest/internal/gtest-filepath.h',
'gtest/include/gtest/internal/gtest-internal.h',
'gtest/include/gtest/internal/gtest-linked_ptr.h',
'gtest/include/gtest/internal/gtest-param-util-generated.h',
'gtest/include/gtest/internal/gtest-param-util.h',
'gtest/include/gtest/internal/gtest-port.h',
'gtest/include/gtest/internal/gtest-string.h',
'gtest/include/gtest/internal/gtest-tuple.h',
'gtest/include/gtest/internal/gtest-type-util.h',
'gtest/src/gtest-all.cc',
'gtest/src/gtest-death-test.cc',
'gtest/src/gtest-filepath.cc',
'gtest/src/gtest-internal-inl.h',
'gtest/src/gtest-port.cc',
'gtest/src/gtest-printers.cc',
'gtest/src/gtest-test-part.cc',
'gtest/src/gtest-typed-test.cc',
'gtest/src/gtest.cc',
'gtest-support.h',
'../testing/gtest/include/gtest/gtest-death-test.h',
'../testing/gtest/include/gtest/gtest-message.h',
'../testing/gtest/include/gtest/gtest-param-test.h',
'../testing/gtest/include/gtest/gtest-printers.h',
'../testing/gtest/include/gtest/gtest-spi.h',
'../testing/gtest/include/gtest/gtest-test-part.h',
'../testing/gtest/include/gtest/gtest-typed-test.h',
'../testing/gtest/include/gtest/gtest.h',
'../testing/gtest/include/gtest/gtest_pred_impl.h',
'../testing/gtest/include/gtest/internal/gtest-death-test-internal.h',
'../testing/gtest/include/gtest/internal/gtest-filepath.h',
'../testing/gtest/include/gtest/internal/gtest-internal.h',
'../testing/gtest/include/gtest/internal/gtest-linked_ptr.h',
'../testing/gtest/include/gtest/internal/gtest-param-util-generated.h',
'../testing/gtest/include/gtest/internal/gtest-param-util.h',
'../testing/gtest/include/gtest/internal/gtest-port.h',
'../testing/gtest/include/gtest/internal/gtest-string.h',
'../testing/gtest/include/gtest/internal/gtest-tuple.h',
'../testing/gtest/include/gtest/internal/gtest-type-util.h',
'../testing/gtest/src/gtest-all.cc',
'../testing/gtest/src/gtest-death-test.cc',
'../testing/gtest/src/gtest-filepath.cc',
'../testing/gtest/src/gtest-internal-inl.h',
'../testing/gtest/src/gtest-port.cc',
'../testing/gtest/src/gtest-printers.cc',
'../testing/gtest/src/gtest-test-part.cc',
'../testing/gtest/src/gtest-typed-test.cc',
'../testing/gtest/src/gtest.cc',
'../testing/gtest-support.h',
],
'sources!': [
'gtest/src/gtest-all.cc', # Not needed by our build.
'../testing/gtest/src/gtest-all.cc', # Not needed by our build.
],
'include_dirs': [
'gtest',
'gtest/include',
'../testing/gtest',
'../testing/gtest/include',
],
'dependencies': [
'gtest_prod',
@ -78,7 +78,7 @@
'UNIT_TEST',
],
'include_dirs': [
'gtest/include', # So that gtest headers can find themselves.
'../testing/gtest/include', # So that gtest headers can find themselves.
],
'target_conditions': [
['_type=="executable"', {
@ -107,7 +107,7 @@
'gtest',
],
'sources': [
'gtest/src/gtest_main.cc',
'../testing/gtest/src/gtest_main.cc',
],
},
{
@ -115,7 +115,7 @@
'toolsets': ['host', 'target'],
'type': 'none',
'sources': [
'gtest/include/gtest/gtest_prod.h',
'../testing/gtest/include/gtest/gtest_prod.h',
],
},
],

18
deps/v8/gypfiles/gyp_v8 vendored Executable file → Normal file
View File

@ -48,7 +48,8 @@ import gyp
# Add paths so that pymod_do_main(...) can import files.
sys.path.insert(
1, os.path.abspath(os.path.join(v8_root, 'tools', 'generate_shim_headers')))
sys.path.append(
os.path.abspath(os.path.join(v8_root, 'third_party', 'binutils')))
def GetOutputDirectory():
"""Returns the output directory that GYP will use."""
@ -108,14 +109,19 @@ def run_gyp(args):
if __name__ == '__main__':
args = sys.argv[1:]
gyp_chromium_no_action = os.environ.get('GYP_CHROMIUM_NO_ACTION', '1')
if gyp_chromium_no_action != '0':
print 'GYP is now disabled by default.\n'
print 'If you really want to run this, set the environment variable '
print 'GYP_CHROMIUM_NO_ACTION=0.'
gyp_chromium_no_action = os.environ.get('GYP_CHROMIUM_NO_ACTION')
if gyp_chromium_no_action == '1':
print 'Skipping gyp_v8 due to GYP_CHROMIUM_NO_ACTION env var.'
sys.exit(0)
running_as_hook = '--running-as-hook'
if running_as_hook in args and gyp_chromium_no_action != '0':
print 'GYP is now disabled by default in runhooks.\n'
print 'If you really want to run this, either run '
print '`python gypfiles/gyp_v8` explicitly by hand '
print 'or set the environment variable GYP_CHROMIUM_NO_ACTION=0.'
sys.exit(0)
if running_as_hook in args:
args.remove(running_as_hook)

39
deps/v8/gypfiles/inspector-test.gyp vendored Normal file
View File

@ -0,0 +1,39 @@
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'v8_code': 1,
},
'includes': ['toolchain.gypi', 'features.gypi'],
'targets': [
{
'target_name': 'inspector-test',
'type': 'executable',
'dependencies': [
'v8.gyp:v8_libplatform',
'v8.gyp:v8_libbase',
'v8.gyp:v8',
],
'include_dirs': [
'..',
],
'sources': [
'../test/inspector/inspector-test.cc',
'../test/inspector/isolate-data.cc',
'../test/inspector/isolate-data.h',
'../test/inspector/task-runner.cc',
'../test/inspector/task-runner.h',
],
'conditions': [
['v8_enable_i18n_support==1', {
'dependencies': [
'<(icu_gyp_path):icui18n',
'<(icu_gyp_path):icuuc',
],
}],
],
},
],
}

View File

@ -4,11 +4,12 @@
{
'variables': {
'protocol_path': '../../third_party/inspector_protocol',
'protocol_path': '../third_party/inspector_protocol',
'inspector_path': '../src/inspector',
},
'includes': [
'inspector.gypi',
'<(PRODUCT_DIR)/../../../third_party/inspector_protocol/inspector_protocol.gypi',
'../third_party/inspector_protocol/inspector_protocol.gypi',
],
'targets': [
{ 'target_name': 'inspector_injected_script',
@ -18,7 +19,7 @@
{
'action_name': 'convert_js_to_cpp_char_array',
'inputs': [
'build/xxd.py',
'<(inspector_path)/build/xxd.py',
'<(inspector_injected_script_source)',
],
'outputs': [
@ -26,9 +27,9 @@
],
'action': [
'python',
'build/xxd.py',
'<(inspector_path)/build/xxd.py',
'InjectedScriptSource_js',
'injected-script-source.js',
'<(inspector_path)/injected-script-source.js',
'<@(_outputs)'
],
},
@ -43,7 +44,7 @@
{
'action_name': 'protocol_compatibility',
'inputs': [
'js_protocol.json',
'<(inspector_path)/js_protocol.json',
],
'outputs': [
'<@(SHARED_INTERMEDIATE_DIR)/src/js_protocol.stamp',
@ -52,7 +53,7 @@
'python',
'<(protocol_path)/CheckProtocolCompatibility.py',
'--stamp', '<@(_outputs)',
'js_protocol.json',
'<(inspector_path)/js_protocol.json',
],
'message': 'Generating inspector protocol sources from protocol json definition',
},
@ -66,8 +67,8 @@
{
'action_name': 'protocol_generated_sources',
'inputs': [
'js_protocol.json',
'inspector_protocol_config.json',
'<(inspector_path)/js_protocol.json',
'<(inspector_path)/inspector_protocol_config.json',
'<@(inspector_protocol_files)',
],
'outputs': [
@ -76,9 +77,9 @@
'action': [
'python',
'<(protocol_path)/CodeGenerator.py',
'--jinja_dir', '../../third_party',
'--jinja_dir', '../third_party',
'--output_base', '<(SHARED_INTERMEDIATE_DIR)/src/inspector',
'--config', 'inspector_protocol_config.json',
'--config', '<(inspector_path)/inspector_protocol_config.json',
],
'message': 'Generating inspector protocol sources from protocol json',
},

90
deps/v8/gypfiles/inspector.gypi vendored Normal file
View File

@ -0,0 +1,90 @@
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'inspector_generated_sources': [
'<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Forward.h',
'<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Protocol.cpp',
'<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Protocol.h',
'<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Console.cpp',
'<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Console.h',
'<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Debugger.cpp',
'<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Debugger.h',
'<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/HeapProfiler.cpp',
'<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/HeapProfiler.h',
'<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Profiler.cpp',
'<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Profiler.h',
'<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Runtime.cpp',
'<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Runtime.h',
'<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Schema.cpp',
'<(SHARED_INTERMEDIATE_DIR)/src/inspector/protocol/Schema.h',
'<(SHARED_INTERMEDIATE_DIR)/include/inspector/Debugger.h',
'<(SHARED_INTERMEDIATE_DIR)/include/inspector/Runtime.h',
'<(SHARED_INTERMEDIATE_DIR)/include/inspector/Schema.h',
],
'inspector_injected_script_source': '../src/inspector/injected-script-source.js',
'inspector_generated_injected_script': '<(SHARED_INTERMEDIATE_DIR)/src/inspector/injected-script-source.h',
'inspector_all_sources': [
'<@(inspector_generated_sources)',
'<(inspector_generated_injected_script)',
'../include/v8-inspector.h',
'../include/v8-inspector-protocol.h',
'../src/inspector/injected-script.cc',
'../src/inspector/injected-script.h',
'../src/inspector/inspected-context.cc',
'../src/inspector/inspected-context.h',
'../src/inspector/remote-object-id.cc',
'../src/inspector/remote-object-id.h',
'../src/inspector/search-util.cc',
'../src/inspector/search-util.h',
'../src/inspector/string-16.cc',
'../src/inspector/string-16.h',
'../src/inspector/string-util.cc',
'../src/inspector/string-util.h',
'../src/inspector/test-interface.cc',
'../src/inspector/test-interface.h',
'../src/inspector/v8-console.cc',
'../src/inspector/v8-console.h',
'../src/inspector/v8-console-agent-impl.cc',
'../src/inspector/v8-console-agent-impl.h',
'../src/inspector/v8-console-message.cc',
'../src/inspector/v8-console-message.h',
'../src/inspector/v8-debugger.cc',
'../src/inspector/v8-debugger.h',
'../src/inspector/v8-debugger-agent-impl.cc',
'../src/inspector/v8-debugger-agent-impl.h',
'../src/inspector/v8-debugger-script.cc',
'../src/inspector/v8-debugger-script.h',
'../src/inspector/v8-function-call.cc',
'../src/inspector/v8-function-call.h',
'../src/inspector/v8-heap-profiler-agent-impl.cc',
'../src/inspector/v8-heap-profiler-agent-impl.h',
'../src/inspector/v8-injected-script-host.cc',
'../src/inspector/v8-injected-script-host.h',
'../src/inspector/v8-inspector-impl.cc',
'../src/inspector/v8-inspector-impl.h',
'../src/inspector/v8-inspector-session-impl.cc',
'../src/inspector/v8-inspector-session-impl.h',
'../src/inspector/v8-internal-value-type.cc',
'../src/inspector/v8-internal-value-type.h',
'../src/inspector/v8-profiler-agent-impl.cc',
'../src/inspector/v8-profiler-agent-impl.h',
'../src/inspector/v8-regex.cc',
'../src/inspector/v8-regex.h',
'../src/inspector/v8-runtime-agent-impl.cc',
'../src/inspector/v8-runtime-agent-impl.h',
'../src/inspector/v8-schema-agent-impl.cc',
'../src/inspector/v8-schema-agent-impl.h',
'../src/inspector/v8-stack-trace-impl.cc',
'../src/inspector/v8-stack-trace-impl.h',
'../src/inspector/v8-value-utils.cc',
'../src/inspector/v8-value-utils.h',
'../src/inspector/wasm-translation.cc',
'../src/inspector/wasm-translation.h',
]
}
}

View File

@ -1,100 +0,0 @@
# Copyright 2015 the V8 project authors. All rights reserved.
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This file is meant to be included into a target to provide a rule
# to "build" .isolate files into a .isolated file.
#
# To use this, create a gyp target with the following form:
# 'conditions': [
# ['test_isolation_mode != "noop"', {
# 'targets': [
# {
# 'target_name': 'foo_test_run',
# 'type': 'none',
# 'dependencies': [
# 'foo_test',
# ],
# 'includes': [
# '../gypfiles/isolate.gypi',
# ],
# 'sources': [
# 'foo_test.isolate',
# ],
# },
# ],
# }],
# ],
#
# Note: foo_test.isolate is included and a source file. It is an inherent
# property of the .isolate format. This permits to define GYP variables but is
# a stricter format than GYP so isolate.py can read it.
#
# The generated .isolated file will be:
# <(PRODUCT_DIR)/foo_test.isolated
#
# See http://dev.chromium.org/developers/testing/isolated-testing/for-swes
# for more information.
{
'rules': [
{
'rule_name': 'isolate',
'extension': 'isolate',
'inputs': [
# Files that are known to be involved in this step.
'<(DEPTH)/tools/isolate_driver.py',
'<(DEPTH)/tools/swarming_client/isolate.py',
'<(DEPTH)/tools/swarming_client/run_isolated.py',
],
'outputs': [
'<(PRODUCT_DIR)/<(RULE_INPUT_ROOT).isolated',
],
'action': [
'python',
'<(DEPTH)/tools/isolate_driver.py',
'<(test_isolation_mode)',
'--isolated', '<(PRODUCT_DIR)/<(RULE_INPUT_ROOT).isolated',
'--isolate', '<(RULE_INPUT_PATH)',
# Variables should use the -V FOO=<(FOO) form so frequent values,
# like '0' or '1', aren't stripped out by GYP. Run 'isolate.py help'
# for more details.
# Path variables are used to replace file paths when loading a .isolate
# file
'--path-variable', 'DEPTH', '<(DEPTH)',
'--path-variable', 'PRODUCT_DIR', '<(PRODUCT_DIR)',
'--config-variable', 'CONFIGURATION_NAME=<(CONFIGURATION_NAME)',
'--config-variable', 'OS=<(OS)',
'--config-variable', 'asan=<(asan)',
'--config-variable', 'cfi_vptr=<(cfi_vptr)',
'--config-variable', 'gcmole=<(gcmole)',
'--config-variable', 'has_valgrind=<(has_valgrind)',
'--config-variable', 'icu_use_data_file_flag=<(icu_use_data_file_flag)',
'--config-variable', 'msan=<(msan)',
'--config-variable', 'tsan=<(tsan)',
'--config-variable', 'coverage=<(coverage)',
'--config-variable', 'sanitizer_coverage=<(sanitizer_coverage)',
'--config-variable', 'component=<(component)',
'--config-variable', 'target_arch=<(target_arch)',
'--config-variable', 'ubsan_vptr=0',
'--config-variable', 'v8_use_external_startup_data=<(v8_use_external_startup_data)',
'--config-variable', 'v8_use_snapshot=<(v8_use_snapshot)',
],
'conditions': [
['OS=="win"', {
'action': [
'--config-variable', 'msvs_version=2013',
],
}, {
'action': [
'--config-variable', 'msvs_version=0',
],
}],
],
},
],
}

View File

@ -1,123 +0,0 @@
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import functools
import logging
import os
import shlex
import sys
def memoize(default=None):
"""This decorator caches the return value of a parameterless pure function"""
def memoizer(func):
val = []
@functools.wraps(func)
def inner():
if not val:
ret = func()
val.append(ret if ret is not None else default)
if logging.getLogger().isEnabledFor(logging.INFO):
print '%s -> %r' % (func.__name__, val[0])
return val[0]
return inner
return memoizer
@memoize()
def IsWindows():
return sys.platform in ['win32', 'cygwin']
@memoize()
def IsLinux():
return sys.platform.startswith(('linux', 'freebsd'))
@memoize()
def IsMac():
return sys.platform == 'darwin'
@memoize()
def gyp_defines():
"""Parses and returns GYP_DEFINES env var as a dictionary."""
return dict(arg.split('=', 1)
for arg in shlex.split(os.environ.get('GYP_DEFINES', '')))
@memoize()
def gyp_generator_flags():
"""Parses and returns GYP_GENERATOR_FLAGS env var as a dictionary."""
return dict(arg.split('=', 1)
for arg in shlex.split(os.environ.get('GYP_GENERATOR_FLAGS', '')))
@memoize()
def gyp_msvs_version():
return os.environ.get('GYP_MSVS_VERSION', '')
@memoize()
def distributor():
"""
Returns a string which is the distributed build engine in use (if any).
Possible values: 'goma', 'ib', ''
"""
if 'goma' in gyp_defines():
return 'goma'
elif IsWindows():
if 'CHROME_HEADLESS' in os.environ:
return 'ib' # use (win and !goma and headless) as approximation of ib
@memoize()
def platform():
"""
Returns a string representing the platform this build is targeted for.
Possible values: 'win', 'mac', 'linux', 'ios', 'android'
"""
if 'OS' in gyp_defines():
if 'android' in gyp_defines()['OS']:
return 'android'
else:
return gyp_defines()['OS']
elif IsWindows():
return 'win'
elif IsLinux():
return 'linux'
else:
return 'mac'
@memoize()
def builder():
"""
Returns a string representing the build engine (not compiler) to use.
Possible values: 'make', 'ninja', 'xcode', 'msvs', 'scons'
"""
if 'GYP_GENERATORS' in os.environ:
# for simplicity, only support the first explicit generator
generator = os.environ['GYP_GENERATORS'].split(',')[0]
if generator.endswith('-android'):
return generator.split('-')[0]
elif generator.endswith('-ninja'):
return 'ninja'
else:
return generator
else:
if platform() == 'android':
# Good enough for now? Do any android bots use make?
return 'make'
elif platform() == 'ios':
return 'xcode'
elif IsWindows():
return 'msvs'
elif IsLinux():
return 'make'
elif IsMac():
return 'xcode'
else:
assert False, 'Don\'t know what builder we\'re using!'

View File

@ -1,245 +0,0 @@
#!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script runs every build as the first hook (See DEPS). If it detects that
the build should be clobbered, it will delete the contents of the build
directory.
A landmine is tripped when a builder checks out a different revision, and the
diff between the new landmines and the old ones is non-null. At this point, the
build is clobbered.
"""
import difflib
import errno
import gyp_environment
import logging
import optparse
import os
import re
import shutil
import sys
import subprocess
import time
import landmine_utils
SRC_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def get_build_dir(build_tool, is_iphone=False):
"""
Returns output directory absolute path dependent on build and targets.
Examples:
r'c:\b\build\slave\win\build\src\out'
'/mnt/data/b/build/slave/linux/build/src/out'
'/b/build/slave/ios_rel_device/build/src/xcodebuild'
Keep this function in sync with tools/build/scripts/slave/compile.py
"""
ret = None
if build_tool == 'xcode':
ret = os.path.join(SRC_DIR, 'xcodebuild')
elif build_tool in ['make', 'ninja', 'ninja-ios']: # TODO: Remove ninja-ios.
if 'CHROMIUM_OUT_DIR' in os.environ:
output_dir = os.environ.get('CHROMIUM_OUT_DIR').strip()
if not output_dir:
raise Error('CHROMIUM_OUT_DIR environment variable is set but blank!')
else:
output_dir = landmine_utils.gyp_generator_flags().get('output_dir', 'out')
ret = os.path.join(SRC_DIR, output_dir)
elif build_tool in ['msvs', 'vs', 'ib']:
ret = os.path.join(SRC_DIR, 'build')
else:
raise NotImplementedError('Unexpected GYP_GENERATORS (%s)' % build_tool)
return os.path.abspath(ret)
def extract_gn_build_commands(build_ninja_file):
"""Extracts from a build.ninja the commands to run GN.
The commands to run GN are the gn rule and build.ninja build step at the
top of the build.ninja file. We want to keep these when deleting GN builds
since we want to preserve the command-line flags to GN.
On error, returns the empty string."""
result = ""
with open(build_ninja_file, 'r') as f:
# Read until the second blank line. The first thing GN writes to the file
# is the "rule gn" and the second is the section for "build build.ninja",
# separated by blank lines.
num_blank_lines = 0
while num_blank_lines < 2:
line = f.readline()
if len(line) == 0:
return '' # Unexpected EOF.
result += line
if line[0] == '\n':
num_blank_lines = num_blank_lines + 1
return result
def delete_build_dir(build_dir):
# GN writes a build.ninja.d file. Note that not all GN builds have args.gn.
build_ninja_d_file = os.path.join(build_dir, 'build.ninja.d')
if not os.path.exists(build_ninja_d_file):
shutil.rmtree(build_dir)
return
# GN builds aren't automatically regenerated when you sync. To avoid
# messing with the GN workflow, erase everything but the args file, and
# write a dummy build.ninja file that will automatically rerun GN the next
# time Ninja is run.
build_ninja_file = os.path.join(build_dir, 'build.ninja')
build_commands = extract_gn_build_commands(build_ninja_file)
try:
gn_args_file = os.path.join(build_dir, 'args.gn')
with open(gn_args_file, 'r') as f:
args_contents = f.read()
except IOError:
args_contents = ''
shutil.rmtree(build_dir)
# Put back the args file (if any).
os.mkdir(build_dir)
if args_contents != '':
with open(gn_args_file, 'w') as f:
f.write(args_contents)
# Write the build.ninja file sufficiently to regenerate itself.
with open(os.path.join(build_dir, 'build.ninja'), 'w') as f:
if build_commands != '':
f.write(build_commands)
else:
# Couldn't parse the build.ninja file, write a default thing.
f.write('''rule gn
command = gn -q gen //out/%s/
description = Regenerating ninja files
build build.ninja: gn
generator = 1
depfile = build.ninja.d
''' % (os.path.split(build_dir)[1]))
# Write a .d file for the build which references a nonexistant file. This
# will make Ninja always mark the build as dirty.
with open(build_ninja_d_file, 'w') as f:
f.write('build.ninja: nonexistant_file.gn\n')
def needs_clobber(landmines_path, new_landmines):
if os.path.exists(landmines_path):
with open(landmines_path, 'r') as f:
old_landmines = f.readlines()
if old_landmines != new_landmines:
old_date = time.ctime(os.stat(landmines_path).st_ctime)
diff = difflib.unified_diff(old_landmines, new_landmines,
fromfile='old_landmines', tofile='new_landmines',
fromfiledate=old_date, tofiledate=time.ctime(), n=0)
sys.stdout.write('Clobbering due to:\n')
sys.stdout.writelines(diff)
return True
else:
sys.stdout.write('Clobbering due to missing landmines file.\n')
return True
return False
def clobber_if_necessary(new_landmines):
"""Does the work of setting, planting, and triggering landmines."""
out_dir = get_build_dir(landmine_utils.builder())
landmines_path = os.path.normpath(os.path.join(out_dir, '..', '.landmines'))
try:
os.makedirs(out_dir)
except OSError as e:
if e.errno == errno.EEXIST:
pass
if needs_clobber(landmines_path, new_landmines):
# Clobber contents of build directory but not directory itself: some
# checkouts have the build directory mounted.
for f in os.listdir(out_dir):
path = os.path.join(out_dir, f)
if os.path.basename(out_dir) == 'build':
# Only delete build directories and files for MSVS builds as the folder
# shares some checked out files and directories.
if (os.path.isdir(path) and
re.search(r'(?:[Rr]elease)|(?:[Dd]ebug)', f)):
delete_build_dir(path)
elif (os.path.isfile(path) and
(path.endswith('.sln') or
path.endswith('.vcxproj') or
path.endswith('.vcxproj.user'))):
os.unlink(path)
else:
if os.path.isfile(path):
os.unlink(path)
elif os.path.isdir(path):
delete_build_dir(path)
if os.path.basename(out_dir) == 'xcodebuild':
# Xcodebuild puts an additional project file structure into build,
# while the output folder is xcodebuild.
project_dir = os.path.join(SRC_DIR, 'build', 'all.xcodeproj')
if os.path.exists(project_dir) and os.path.isdir(project_dir):
delete_build_dir(project_dir)
# Save current set of landmines for next time.
with open(landmines_path, 'w') as f:
f.writelines(new_landmines)
def process_options():
"""Returns a list of landmine emitting scripts."""
parser = optparse.OptionParser()
parser.add_option(
'-s', '--landmine-scripts', action='append',
default=[os.path.join(SRC_DIR, 'gypfiles', 'get_landmines.py')],
help='Path to the script which emits landmines to stdout. The target '
'is passed to this script via option -t. Note that an extra '
'script can be specified via an env var EXTRA_LANDMINES_SCRIPT.')
parser.add_option('-v', '--verbose', action='store_true',
default=('LANDMINES_VERBOSE' in os.environ),
help=('Emit some extra debugging information (default off). This option '
'is also enabled by the presence of a LANDMINES_VERBOSE environment '
'variable.'))
options, args = parser.parse_args()
if args:
parser.error('Unknown arguments %s' % args)
logging.basicConfig(
level=logging.DEBUG if options.verbose else logging.ERROR)
extra_script = os.environ.get('EXTRA_LANDMINES_SCRIPT')
if extra_script:
return options.landmine_scripts + [extra_script]
else:
return options.landmine_scripts
def main():
landmine_scripts = process_options()
if landmine_utils.builder() in ('dump_dependency_json', 'eclipse'):
return 0
gyp_environment.set_environment()
landmines = []
for s in landmine_scripts:
proc = subprocess.Popen([sys.executable, s], stdout=subprocess.PIPE)
output, _ = proc.communicate()
landmines.extend([('%s\n' % l.strip()) for l in output.splitlines()])
clobber_if_necessary(landmines)
return 0
if __name__ == '__main__':
sys.exit(main())

27
deps/v8/gypfiles/mkgrokdump.gyp vendored Normal file
View File

@ -0,0 +1,27 @@
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'v8_code': 1,
},
'includes': ['toolchain.gypi', 'features.gypi'],
'targets': [
{
'target_name': 'mkgrokdump',
'type': 'executable',
'dependencies': [
'v8.gyp:v8',
'v8.gyp:v8_libbase',
'v8.gyp:v8_libplatform',
],
'include_dirs': [
'..',
],
'sources': [
'../test/mkgrokdump/mkgrokdump.cc',
],
},
],
}

View File

@ -30,15 +30,15 @@
'v8_code': 1,
'v8_enable_i18n_support%': 1,
},
'includes': ['../gypfiles/toolchain.gypi', '../gypfiles/features.gypi'],
'includes': ['toolchain.gypi', 'features.gypi'],
'targets': [
{
'target_name': 'parser-shell',
'type': 'executable',
'dependencies': [
'../src/v8.gyp:v8',
'../src/v8.gyp:v8_libbase',
'../src/v8.gyp:v8_libplatform',
'v8.gyp:v8',
'v8.gyp:v8_libbase',
'v8.gyp:v8_libplatform',
],
'conditions': [
['v8_enable_i18n_support==1', {
@ -52,8 +52,8 @@
'..',
],
'sources': [
'parser-shell.cc',
'shell-utils.h',
'../tools/parser-shell.cc',
'../tools/shell-utils.h',
],
},
],

0
deps/v8/gypfiles/run-tests-legacy.py vendored Executable file → Normal file
View File

61
deps/v8/gypfiles/samples.gyp vendored Normal file
View File

@ -0,0 +1,61 @@
# Copyright 2012 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'v8_code': 1,
'v8_enable_i18n_support%': 1,
'v8_toolset_for_shell%': 'target',
},
'includes': ['toolchain.gypi', 'features.gypi'],
'target_defaults': {
'type': 'executable',
'dependencies': [
'v8.gyp:v8',
'v8.gyp:v8_libbase',
'v8.gyp:v8_libplatform',
],
'include_dirs': [
'..',
],
'conditions': [
['v8_enable_i18n_support==1', {
'dependencies': [
'<(icu_gyp_path):icui18n',
'<(icu_gyp_path):icuuc',
],
}],
['OS=="win" and v8_enable_i18n_support==1', {
'dependencies': [
'<(icu_gyp_path):icudata',
],
}],
],
},
'targets': [
{
'target_name': 'v8_shell',
'sources': [
'../samples/shell.cc',
],
'conditions': [
[ 'want_separate_host_toolset==1', {
'toolsets': [ '<(v8_toolset_for_shell)', ],
}],
],
},
{
'target_name': 'hello-world',
'sources': [
'../samples/hello-world.cc',
],
},
{
'target_name': 'process',
'sources': [
'../samples/process.cc',
],
},
],
}

0
deps/v8/gypfiles/sysroot_ld_flags.sh vendored Executable file → Normal file
View File

View File

@ -58,6 +58,9 @@
# Similar to the ARM hard float ABI but on MIPS.
'v8_use_mips_abi_hardfloat%': 'true',
# MIPS MSA support
'mips_use_msa%': 0,
# Print to stdout on Android.
'v8_android_log_stdout%': 0,
@ -439,6 +442,9 @@
'cflags': ['-mips32r6'],
'ldflags': ['-mips32r6'],
}],
['mips_arch_variant=="r6" and mips_use_msa==1', {
'defines': [ '_MIPS_MSA' ],
}],
['mips_arch_variant=="r2"', {
'conditions': [
[ 'mips_fpu_mode=="fp64"', {
@ -507,6 +513,9 @@
'FPU_MODE_FP64',
],
}],
['mips_arch_variant=="r6" and mips_use_msa==1', {
'defines': [ '_MIPS_MSA' ],
}],
['mips_arch_variant=="r2"', {
'conditions': [
[ 'mips_fpu_mode=="fp64"', {
@ -558,6 +567,9 @@
'FPU_MODE_FP64',
],
}],
['mips_arch_variant=="r6" and mips_use_msa==1', {
'defines': [ '_MIPS_MSA' ],
}],
['mips_arch_variant=="r2"', {
'conditions': [
['mips_fpu_mode=="fp64"', {
@ -640,6 +652,9 @@
'cflags': ['-mips32r6'],
'ldflags': ['-mips32r6'],
}],
['mips_arch_variant=="r6" and mips_use_msa==1', {
'defines': [ '_MIPS_MSA' ],
}],
['mips_arch_variant=="r2"', {
'conditions': [
[ 'mips_fpu_mode=="fp64"', {
@ -721,6 +736,9 @@
'FPU_MODE_FP64',
],
}],
['mips_arch_variant=="r6" and mips_use_msa==1', {
'defines': [ '_MIPS_MSA' ],
}],
['mips_arch_variant=="r2"', {
'conditions': [
[ 'mips_fpu_mode=="fp64"', {
@ -778,6 +796,9 @@
'FPU_MODE_FP64',
],
}],
['mips_arch_variant=="r6" and mips_use_msa==1', {
'defines': [ '_MIPS_MSA' ],
}],
['mips_arch_variant=="r2"', {
'conditions': [
['mips_fpu_mode=="fp64"', {
@ -877,6 +898,9 @@
'cflags': ['-mips64r6', '-mabi=64'],
'ldflags': ['-mips64r6', '-mabi=64'],
}],
['mips_arch_variant=="r6" and mips_use_msa==1', {
'defines': [ '_MIPS_MSA' ],
}],
['mips_arch_variant=="r2"', {
'defines': ['_MIPS_ARCH_MIPS64R2',],
'conditions': [
@ -895,6 +919,9 @@
['mips_arch_variant=="r6"', {
'defines': ['_MIPS_ARCH_MIPS64R6',],
}],
['mips_arch_variant=="r6" and mips_use_msa==1', {
'defines': [ '_MIPS_MSA' ],
}],
['mips_arch_variant=="r2"', {
'defines': ['_MIPS_ARCH_MIPS64R2',],
}],
@ -907,6 +934,9 @@
['mips_arch_variant=="r6"', {
'defines': ['_MIPS_ARCH_MIPS64R6',],
}],
['mips_arch_variant=="r6" and mips_use_msa==1', {
'defines': [ '_MIPS_MSA' ],
}],
['mips_arch_variant=="r2"', {
'defines': ['_MIPS_ARCH_MIPS64R2',],
}],
@ -1225,7 +1255,9 @@
'OBJECT_PRINT',
'VERIFY_HEAP',
'DEBUG',
'V8_TRACE_MAPS'
'V8_TRACE_MAPS',
'V8_ENABLE_ALLOCATION_TIMEOUT',
'V8_ENABLE_FORCE_SLOW_PATH',
],
'conditions': [
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" or \

287
deps/v8/gypfiles/unittests.gyp vendored Normal file
View File

@ -0,0 +1,287 @@
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# The sources are kept automatically in sync with BUILD.gn.
{
'variables': {
'v8_code': 1,
'unittests_sources': [
'../test/unittests/allocation-unittest.cc',
'../test/unittests/api/access-check-unittest.cc',
'../test/unittests/api/exception-unittest.cc',
'../test/unittests/api/interceptor-unittest.cc',
'../test/unittests/api/isolate-unittest.cc',
'../test/unittests/api/remote-object-unittest.cc',
'../test/unittests/api/v8-object-unittest.cc',
'../test/unittests/asmjs/asm-scanner-unittest.cc',
'../test/unittests/asmjs/asm-types-unittest.cc',
'../test/unittests/asmjs/switch-logic-unittest.cc',
'../test/unittests/base/atomic-utils-unittest.cc',
'../test/unittests/base/bits-unittest.cc',
'../test/unittests/base/cpu-unittest.cc',
'../test/unittests/base/division-by-constant-unittest.cc',
'../test/unittests/base/flags-unittest.cc',
'../test/unittests/base/functional-unittest.cc',
'../test/unittests/base/ieee754-unittest.cc',
'../test/unittests/base/logging-unittest.cc',
'../test/unittests/base/macros-unittest.cc',
'../test/unittests/base/iterator-unittest.cc',
'../test/unittests/base/ostreams-unittest.cc',
'../test/unittests/base/platform/condition-variable-unittest.cc',
'../test/unittests/base/platform/mutex-unittest.cc',
'../test/unittests/base/platform/platform-unittest.cc',
'../test/unittests/base/platform/semaphore-unittest.cc',
'../test/unittests/base/platform/time-unittest.cc',
'../test/unittests/base/sys-info-unittest.cc',
'../test/unittests/base/template-utils-unittest.cc',
'../test/unittests/base/utils/random-number-generator-unittest.cc',
'../test/unittests/bigint-unittest.cc',
'../test/unittests/cancelable-tasks-unittest.cc',
'../test/unittests/char-predicates-unittest.cc',
"../test/unittests/code-stub-assembler-unittest.cc",
"../test/unittests/code-stub-assembler-unittest.h",
'../test/unittests/compiler/branch-elimination-unittest.cc',
'../test/unittests/compiler/bytecode-analysis-unittest.cc',
'../test/unittests/compiler/checkpoint-elimination-unittest.cc',
"../test/unittests/compiler/code-assembler-unittest.cc",
"../test/unittests/compiler/code-assembler-unittest.h",
'../test/unittests/compiler/common-operator-reducer-unittest.cc',
'../test/unittests/compiler/common-operator-unittest.cc',
'../test/unittests/compiler/compiler-test-utils.h',
'../test/unittests/compiler/control-equivalence-unittest.cc',
'../test/unittests/compiler/control-flow-optimizer-unittest.cc',
'../test/unittests/compiler/dead-code-elimination-unittest.cc',
'../test/unittests/compiler/diamond-unittest.cc',
'../test/unittests/compiler/effect-control-linearizer-unittest.cc',
'../test/unittests/compiler/graph-reducer-unittest.cc',
'../test/unittests/compiler/graph-reducer-unittest.h',
'../test/unittests/compiler/graph-trimmer-unittest.cc',
'../test/unittests/compiler/graph-unittest.cc',
'../test/unittests/compiler/graph-unittest.h',
'../test/unittests/compiler/instruction-unittest.cc',
'../test/unittests/compiler/instruction-selector-unittest.cc',
'../test/unittests/compiler/instruction-selector-unittest.h',
'../test/unittests/compiler/instruction-sequence-unittest.cc',
'../test/unittests/compiler/instruction-sequence-unittest.h',
'../test/unittests/compiler/int64-lowering-unittest.cc',
'../test/unittests/compiler/js-builtin-reducer-unittest.cc',
'../test/unittests/compiler/js-create-lowering-unittest.cc',
'../test/unittests/compiler/js-intrinsic-lowering-unittest.cc',
'../test/unittests/compiler/js-operator-unittest.cc',
'../test/unittests/compiler/js-typed-lowering-unittest.cc',
'../test/unittests/compiler/linkage-tail-call-unittest.cc',
'../test/unittests/compiler/live-range-builder.h',
'../test/unittests/compiler/regalloc/live-range-unittest.cc',
'../test/unittests/compiler/load-elimination-unittest.cc',
'../test/unittests/compiler/loop-peeling-unittest.cc',
'../test/unittests/compiler/machine-operator-reducer-unittest.cc',
'../test/unittests/compiler/machine-operator-unittest.cc',
'../test/unittests/compiler/regalloc/move-optimizer-unittest.cc',
'../test/unittests/compiler/node-cache-unittest.cc',
'../test/unittests/compiler/node-matchers-unittest.cc',
'../test/unittests/compiler/node-properties-unittest.cc',
'../test/unittests/compiler/node-test-utils.cc',
'../test/unittests/compiler/node-test-utils.h',
'../test/unittests/compiler/node-unittest.cc',
'../test/unittests/compiler/opcodes-unittest.cc',
'../test/unittests/compiler/persistent-unittest.cc',
'../test/unittests/compiler/regalloc/register-allocator-unittest.cc',
'../test/unittests/compiler/schedule-unittest.cc',
'../test/unittests/compiler/scheduler-unittest.cc',
'../test/unittests/compiler/scheduler-rpo-unittest.cc',
'../test/unittests/compiler/simplified-lowering-unittest.cc',
'../test/unittests/compiler/simplified-operator-reducer-unittest.cc',
'../test/unittests/compiler/simplified-operator-unittest.cc',
'../test/unittests/compiler/state-values-utils-unittest.cc',
'../test/unittests/compiler/typed-optimization-unittest.cc',
'../test/unittests/compiler/typer-unittest.cc',
'../test/unittests/compiler/value-numbering-reducer-unittest.cc',
'../test/unittests/compiler/zone-stats-unittest.cc',
'../test/unittests/compiler-dispatcher/compiler-dispatcher-tracer-unittest.cc',
'../test/unittests/compiler-dispatcher/compiler-dispatcher-unittest.cc',
'../test/unittests/compiler-dispatcher/optimizing-compile-dispatcher-unittest.cc',
'../test/unittests/compiler-dispatcher/unoptimized-compile-job-unittest.cc',
'../test/unittests/counters-unittest.cc',
'../test/unittests/detachable-vector-unittest.cc',
'../test/unittests/eh-frame-iterator-unittest.cc',
'../test/unittests/eh-frame-writer-unittest.cc',
'../test/unittests/heap/barrier-unittest.cc',
'../test/unittests/heap/bitmap-unittest.cc',
'../test/unittests/heap/embedder-tracing-unittest.cc',
'../test/unittests/heap/gc-idle-time-handler-unittest.cc',
'../test/unittests/heap/gc-tracer-unittest.cc',
'../test/unittests/heap/item-parallel-job-unittest.cc',
'../test/unittests/heap/marking-unittest.cc',
'../test/unittests/heap/memory-reducer-unittest.cc',
'../test/unittests/heap/object-stats-unittest.cc',
'../test/unittests/heap/heap-unittest.cc',
'../test/unittests/heap/scavenge-job-unittest.cc',
'../test/unittests/heap/slot-set-unittest.cc',
'../test/unittests/heap/spaces-unittest.cc',
'../test/unittests/heap/unmapper-unittest.cc',
'../test/unittests/heap/worklist-unittest.cc',
'../test/unittests/interpreter/bytecodes-unittest.cc',
'../test/unittests/interpreter/bytecode-array-builder-unittest.cc',
'../test/unittests/interpreter/bytecode-array-iterator-unittest.cc',
'../test/unittests/interpreter/bytecode-array-random-iterator-unittest.cc',
'../test/unittests/interpreter/bytecode-array-writer-unittest.cc',
'../test/unittests/interpreter/bytecode-decoder-unittest.cc',
'../test/unittests/interpreter/bytecode-node-unittest.cc',
'../test/unittests/interpreter/bytecode-operands-unittest.cc',
'../test/unittests/interpreter/bytecode-register-allocator-unittest.cc',
'../test/unittests/interpreter/bytecode-register-optimizer-unittest.cc',
'../test/unittests/interpreter/bytecode-source-info-unittest.cc',
'../test/unittests/interpreter/bytecode-utils.h',
'../test/unittests/interpreter/constant-array-builder-unittest.cc',
'../test/unittests/interpreter/interpreter-assembler-unittest.cc',
'../test/unittests/interpreter/interpreter-assembler-unittest.h',
'../test/unittests/libplatform/default-platform-unittest.cc',
'../test/unittests/libplatform/task-queue-unittest.cc',
'../test/unittests/libplatform/worker-thread-unittest.cc',
'../test/unittests/locked-queue-unittest.cc',
'../test/unittests/object-unittest.cc',
'../test/unittests/parser/ast-value-unittest.cc',
'../test/unittests/parser/preparser-unittest.cc',
'../test/unittests/register-configuration-unittest.cc',
'../test/unittests/run-all-unittests.cc',
'../test/unittests/source-position-table-unittest.cc',
'../test/unittests/test-helpers.cc',
'../test/unittests/test-helpers.h',
'../test/unittests/test-utils.h',
'../test/unittests/test-utils.cc',
'../test/unittests/unicode-unittest.cc',
'../test/unittests/utils-unittest.cc',
'../test/unittests/value-serializer-unittest.cc',
'../test/unittests/zone/segmentpool-unittest.cc',
'../test/unittests/zone/zone-allocator-unittest.cc',
'../test/unittests/zone/zone-chunk-list-unittest.cc',
'../test/unittests/zone/zone-unittest.cc',
'../test/unittests/wasm/control-transfer-unittest.cc',
'../test/unittests/wasm/decoder-unittest.cc',
'../test/unittests/wasm/function-body-decoder-unittest.cc',
'../test/unittests/wasm/wasm-code-manager-unittest.cc',
'../test/unittests/wasm/leb-helper-unittest.cc',
'../test/unittests/wasm/loop-assignment-analysis-unittest.cc',
'../test/unittests/wasm/module-decoder-unittest.cc',
'../test/unittests/wasm/streaming-decoder-unittest.cc',
'../test/unittests/wasm/trap-handler-unittest.cc',
'../test/unittests/wasm/wasm-macro-gen-unittest.cc',
'../test/unittests/wasm/wasm-module-builder-unittest.cc',
'../test/unittests/wasm/wasm-opcodes-unittest.cc',
],
'unittests_sources_arm': [
'../test/unittests/compiler/arm/instruction-selector-arm-unittest.cc',
],
'unittests_sources_arm64': [
'../test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc',
],
'unittests_sources_ia32': [
'../test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc',
],
'unittests_sources_mips': [
'../test/unittests/compiler/mips/instruction-selector-mips-unittest.cc',
],
'unittests_sources_mips64': [
'../test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc',
],
'unittests_sources_x64': [
'../test/unittests/compiler/x64/instruction-selector-x64-unittest.cc',
],
'unittests_sources_ppc': [
'../test/unittests/compiler/ppc/instruction-selector-ppc-unittest.cc',
],
'unittests_sources_s390': [
'../test/unittests/compiler/s390/instruction-selector-s390-unittest.cc',
],
},
'includes': ['toolchain.gypi', 'features.gypi'],
'targets': [
{
'target_name': 'unittests',
'type': 'executable',
'variables': {
'optimize': 'max',
},
'dependencies': [
'gmock.gyp:gmock',
'gtest.gyp:gtest',
'v8.gyp:v8',
'v8.gyp:v8_libbase',
'v8.gyp:v8_libplatform',
'v8.gyp:v8_maybe_snapshot',
],
'include_dirs': [
'..',
],
'sources': [
'<@(unittests_sources)',
],
'conditions': [
['v8_target_arch=="arm"', {
'sources': [
'<@(unittests_sources_arm)',
],
}],
['v8_target_arch=="arm64"', {
'sources': [
'<@(unittests_sources_arm64)',
],
}],
['v8_target_arch=="ia32"', {
'sources': [
'<@(unittests_sources_ia32)',
],
}],
['v8_target_arch=="mips"', {
'sources': [
'<@(unittests_sources_mips)',
],
}],
['v8_target_arch=="mipsel"', {
'sources': [
'<@(unittests_sources_mips)',
],
}],
['v8_target_arch=="mips64"', {
'sources': [
'<@(unittests_sources_mips64)',
],
}],
['v8_target_arch=="mips64el"', {
'sources': [
'<@(unittests_sources_mips64)',
],
}],
['v8_target_arch=="x64"', {
'sources': [
'<@(unittests_sources_x64)',
],
}],
['v8_target_arch=="ppc" or v8_target_arch=="ppc64"', {
'sources': [
'<@(unittests_sources_ppc)',
],
}],
['v8_target_arch=="s390" or v8_target_arch=="s390x"', {
'sources': [
'<@(unittests_sources_s390)',
],
}],
['OS=="aix"', {
'ldflags': [ '-Wl,-bbigtoc' ],
}],
['v8_enable_i18n_support==1', {
'dependencies': [
'<(icu_gyp_path):icui18n',
'<(icu_gyp_path):icuuc',
],
}],
['v8_use_snapshot=="true"', {
'dependencies': ['v8.gyp:v8_initializers'],
}],
],
},
],
}

2613
deps/v8/gypfiles/v8.gyp vendored Normal file

File diff suppressed because it is too large Load Diff

36
deps/v8/gypfiles/v8vtune.gyp vendored Normal file
View File

@ -0,0 +1,36 @@
# Copyright 2012 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'v8_code': 1,
},
'includes': ['toolchain.gypi', 'features.gypi'],
'targets': [
{
'target_name': 'v8_vtune',
'type': 'static_library',
'dependencies': [
'v8.gyp:v8',
],
'sources': [
'../src/third_party/vtune/ittnotify_config.h',
'../src/third_party/vtune/ittnotify_types.h',
'../src/third_party/vtune/jitprofiling.cc',
'../src/third_party/vtune/jitprofiling.h',
'../src/third_party/vtune/v8-vtune.h',
'../src/third_party/vtune/vtune-jit.cc',
'../src/third_party/vtune/vtune-jit.h',
],
'direct_dependent_settings': {
'defines': ['ENABLE_VTUNE_JIT_INTERFACE',],
'conditions': [
['OS != "win"', {
'libraries': ['-ldl',],
}],
],
},
},
],
}

View File

@ -22,19 +22,19 @@ import sys
V8_BASE = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
GYP_FILES = [
os.path.join(V8_BASE, 'src', 'd8.gyp'),
os.path.join(V8_BASE, 'src', 'v8.gyp'),
os.path.join(V8_BASE, 'src', 'inspector', 'inspector.gypi'),
os.path.join(V8_BASE, 'src', 'third_party', 'vtune', 'v8vtune.gyp'),
os.path.join(V8_BASE, 'samples', 'samples.gyp'),
os.path.join(V8_BASE, 'test', 'cctest', 'cctest.gyp'),
os.path.join(V8_BASE, 'test', 'fuzzer', 'fuzzer.gyp'),
os.path.join(V8_BASE, 'test', 'unittests', 'unittests.gyp'),
os.path.join(V8_BASE, 'test', 'inspector', 'inspector.gyp'),
os.path.join(V8_BASE, 'test', 'mkgrokdump', 'mkgrokdump.gyp'),
os.path.join(V8_BASE, 'testing', 'gmock.gyp'),
os.path.join(V8_BASE, 'testing', 'gtest.gyp'),
os.path.join(V8_BASE, 'tools', 'parser-shell.gyp'),
os.path.join(V8_BASE, 'gypfiles', 'd8.gyp'),
os.path.join(V8_BASE, 'gypfiles', 'v8.gyp'),
os.path.join(V8_BASE, 'gypfiles', 'inspector.gypi'),
os.path.join(V8_BASE, 'gypfiles', 'v8vtune.gyp'),
os.path.join(V8_BASE, 'gypfiles', 'samples.gyp'),
os.path.join(V8_BASE, 'gypfiles', 'cctest.gyp'),
os.path.join(V8_BASE, 'gypfiles', 'fuzzer.gyp'),
os.path.join(V8_BASE, 'gypfiles', 'unittests.gyp'),
os.path.join(V8_BASE, 'gypfiles', 'inspector-test.gyp'),
os.path.join(V8_BASE, 'gypfiles', 'mkgrokdump.gyp'),
os.path.join(V8_BASE, 'gypfiles', 'gmock.gyp'),
os.path.join(V8_BASE, 'gypfiles', 'gtest.gyp'),
os.path.join(V8_BASE, 'gypfiles', 'parser-shell.gyp'),
]
ALL_GYP_PREFIXES = [

View File

@ -24,6 +24,6 @@ def PostUploadHook(cl, change, output_api):
return output_api.EnsureCQIncludeTrybotsAreAdded(
cl,
[
'master.tryserver.chromium.linux:linux_chromium_rel_ng'
'luci.chromium.try:linux_chromium_rel_ng'
],
'Automatically added layout test trybots to run tests on CQ.')

View File

@ -1,255 +0,0 @@
// Copyright 2008 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_V8_DEBUG_H_
#define V8_V8_DEBUG_H_
#include "v8.h" // NOLINT(build/include)
/**
* ATTENTION: The debugger API exposed by this file is deprecated and will be
* removed by the end of 2017. Please use the V8 inspector declared
* in include/v8-inspector.h instead.
*/
namespace v8 {
// Debug events which can occur in the V8 JavaScript engine.
enum DebugEvent {
Break = 1,
Exception = 2,
AfterCompile = 3,
CompileError = 4,
AsyncTaskEvent = 5,
};
class V8_EXPORT Debug {
public:
/**
* A client object passed to the v8 debugger whose ownership will be taken by
* it. v8 is always responsible for deleting the object.
*/
class ClientData {
public:
virtual ~ClientData() {}
};
/**
* A message object passed to the debug message handler.
*/
class Message {
public:
/**
* Check type of message.
*/
virtual bool IsEvent() const = 0;
virtual bool IsResponse() const = 0;
virtual DebugEvent GetEvent() const = 0;
/**
* Indicate whether this is a response to a continue command which will
* start the VM running after this is processed.
*/
virtual bool WillStartRunning() const = 0;
/**
* Access to execution state and event data. Don't store these cross
* callbacks as their content becomes invalid. These objects are from the
* debugger event that started the debug message loop.
*/
virtual Local<Object> GetExecutionState() const = 0;
virtual Local<Object> GetEventData() const = 0;
/**
* Get the debugger protocol JSON.
*/
virtual Local<String> GetJSON() const = 0;
/**
* Get the context active when the debug event happened. Note this is not
* the current active context as the JavaScript part of the debugger is
* running in its own context which is entered at this point.
*/
virtual Local<Context> GetEventContext() const = 0;
/**
* Client data passed with the corresponding request if any. This is the
* client_data data value passed into Debug::SendCommand along with the
* request that led to the message or NULL if the message is an event. The
* debugger takes ownership of the data and will delete it even if there is
* no message handler.
*/
virtual ClientData* GetClientData() const = 0;
virtual Isolate* GetIsolate() const = 0;
virtual ~Message() {}
};
/**
* An event details object passed to the debug event listener.
*/
class EventDetails {
public:
/**
* Event type.
*/
virtual DebugEvent GetEvent() const = 0;
/**
* Access to execution state and event data of the debug event. Don't store
* these cross callbacks as their content becomes invalid.
*/
virtual Local<Object> GetExecutionState() const = 0;
virtual Local<Object> GetEventData() const = 0;
/**
* Get the context active when the debug event happened. Note this is not
* the current active context as the JavaScript part of the debugger is
* running in its own context which is entered at this point.
*/
virtual Local<Context> GetEventContext() const = 0;
/**
* Client data passed with the corresponding callback when it was
* registered.
*/
virtual Local<Value> GetCallbackData() const = 0;
/**
* This is now a dummy that returns nullptr.
*/
virtual ClientData* GetClientData() const = 0;
virtual Isolate* GetIsolate() const = 0;
virtual ~EventDetails() {}
};
/**
* Debug event callback function.
*
* \param event_details object providing information about the debug event
*
* A EventCallback does not take possession of the event data,
* and must not rely on the data persisting after the handler returns.
*/
typedef void (*EventCallback)(const EventDetails& event_details);
/**
* This is now a no-op.
*/
typedef void (*MessageHandler)(const Message& message);
V8_DEPRECATED("No longer supported", static bool SetDebugEventListener(
Isolate* isolate, EventCallback that,
Local<Value> data = Local<Value>()));
// Schedule a debugger break to happen when JavaScript code is run
// in the given isolate.
V8_DEPRECATED("No longer supported",
static void DebugBreak(Isolate* isolate));
// Remove scheduled debugger break in given isolate if it has not
// happened yet.
V8_DEPRECATED("No longer supported",
static void CancelDebugBreak(Isolate* isolate));
// Check if a debugger break is scheduled in the given isolate.
V8_DEPRECATED("No longer supported",
static bool CheckDebugBreak(Isolate* isolate));
// This is now a no-op.
V8_DEPRECATED("No longer supported",
static void SetMessageHandler(Isolate* isolate,
MessageHandler handler));
// This is now a no-op.
V8_DEPRECATED("No longer supported",
static void SendCommand(Isolate* isolate,
const uint16_t* command, int length,
ClientData* client_data = NULL));
/**
* Run a JavaScript function in the debugger.
* \param fun the function to call
* \param data passed as second argument to the function
* With this call the debugger is entered and the function specified is called
* with the execution state as the first argument. This makes it possible to
* get access to information otherwise not available during normal JavaScript
* execution e.g. details on stack frames. Receiver of the function call will
* be the debugger context global object, however this is a subject to change.
* The following example shows a JavaScript function which when passed to
* v8::Debug::Call will return the current line of JavaScript execution.
*
* \code
* function frame_source_line(exec_state) {
* return exec_state.frame(0).sourceLine();
* }
* \endcode
*/
V8_DEPRECATED("No longer supported",
static MaybeLocal<Value> Call(
Local<Context> context, v8::Local<v8::Function> fun,
Local<Value> data = Local<Value>()));
// This is now a no-op.
V8_DEPRECATED("No longer supported",
static void ProcessDebugMessages(Isolate* isolate));
/**
* Debugger is running in its own context which is entered while debugger
* messages are being dispatched. This is an explicit getter for this
* debugger context. Note that the content of the debugger context is subject
* to change. The Context exists only when the debugger is active, i.e. at
* least one DebugEventListener or MessageHandler is set.
*/
V8_DEPRECATED("Use v8-inspector",
static Local<Context> GetDebugContext(Isolate* isolate));
/**
* While in the debug context, this method returns the top-most non-debug
* context, if it exists.
*/
V8_DEPRECATED(
"No longer supported",
static MaybeLocal<Context> GetDebuggedContext(Isolate* isolate));
/**
* Enable/disable LiveEdit functionality for the given Isolate
* (default Isolate if not provided). V8 will abort if LiveEdit is
* unexpectedly used. LiveEdit is enabled by default.
*/
V8_DEPRECATED("No longer supported",
static void SetLiveEditEnabled(Isolate* isolate, bool enable));
/**
* Returns array of internal properties specific to the value type. Result has
* the following format: [<name>, <value>,...,<name>, <value>]. Result array
* will be allocated in the current context.
*/
V8_DEPRECATED("No longer supported",
static MaybeLocal<Array> GetInternalProperties(
Isolate* isolate, Local<Value> value));
/**
* Defines if the ES2015 tail call elimination feature is enabled or not.
* The change of this flag triggers deoptimization of all functions that
* contain calls at tail position.
*/
V8_DEPRECATED("No longer supported",
static bool IsTailCallEliminationEnabled(Isolate* isolate));
V8_DEPRECATED("No longer supported",
static void SetTailCallEliminationEnabled(Isolate* isolate,
bool enabled));
};
} // namespace v8
#undef EXPORT
#endif // V8_V8_DEBUG_H_

View File

@ -626,6 +626,68 @@ class V8_EXPORT AllocationProfile {
static const int kNoColumnNumberInfo = Message::kNoColumnInfo;
};
/**
* An object graph consisting of embedder objects and V8 objects.
* Edges of the graph are strong references between the objects.
* The embedder can build this graph during heap snapshot generation
* to include the embedder objects in the heap snapshot.
* Usage:
* 1) Define derived class of EmbedderGraph::Node for embedder objects.
* 2) Set the build embedder graph callback on the heap profiler using
* HeapProfiler::SetBuildEmbedderGraphCallback.
* 3) In the callback use graph->AddEdge(node1, node2) to add an edge from
* node1 to node2.
* 4) To represent references from/to V8 object, construct V8 nodes using
* graph->V8Node(value).
*/
class V8_EXPORT EmbedderGraph {
public:
class Node {
public:
Node() = default;
virtual ~Node() = default;
virtual const char* Name() = 0;
virtual size_t SizeInBytes() = 0;
/**
* The corresponding V8 wrapper node if not null.
* During heap snapshot generation the embedder node and the V8 wrapper
* node will be merged into one node to simplify retaining paths.
*/
virtual Node* WrapperNode() { return nullptr; }
virtual bool IsRootNode() { return false; }
/** Must return true for non-V8 nodes. */
virtual bool IsEmbedderNode() { return true; }
/**
* Optional name prefix. It is used in Chrome for tagging detached nodes.
*/
virtual const char* NamePrefix() { return nullptr; }
private:
Node(const Node&) = delete;
Node& operator=(const Node&) = delete;
};
/**
* Returns a node corresponding to the given V8 value. Ownership is not
* transferred. The result pointer is valid while the graph is alive.
*/
virtual Node* V8Node(const v8::Local<v8::Value>& value) = 0;
/**
* Adds the given node to the graph and takes ownership of the node.
* Returns a raw pointer to the node that is valid while the graph is alive.
*/
virtual Node* AddNode(std::unique_ptr<Node> node) = 0;
/**
* Adds an edge that represents a strong reference from the given node
* |from| to the given node |to|. The nodes must be added to the graph
* before calling this function.
*/
virtual void AddEdge(Node* from, Node* to) = 0;
virtual ~EmbedderGraph() = default;
};
/**
* Interface for controlling heap profiling. Instance of the
@ -665,6 +727,15 @@ class V8_EXPORT HeapProfiler {
typedef RetainedObjectInfo* (*WrapperInfoCallback)(uint16_t class_id,
Local<Value> wrapper);
/**
* Callback function invoked during heap snapshot generation to retrieve
* the embedder object graph. The callback should use graph->AddEdge(..) to
* add references between the objects.
* The callback must not trigger garbage collection in V8.
*/
typedef void (*BuildEmbedderGraphCallback)(v8::Isolate* isolate,
v8::EmbedderGraph* graph);
/** Returns the number of snapshots taken. */
int GetSnapshotCount();
@ -809,6 +880,7 @@ class V8_EXPORT HeapProfiler {
WrapperInfoCallback callback);
void SetGetRetainerInfosCallback(GetRetainerInfosCallback callback);
void SetBuildEmbedderGraphCallback(BuildEmbedderGraphCallback callback);
/**
* Default value of persistent handle class ID. Must not be used to

View File

@ -196,16 +196,6 @@ class PersistentValueMapBase {
return SetReturnValueFromVal(&returnValue, Traits::Get(&impl_, key));
}
/**
* Call Isolate::SetReference with the given parent and the map value.
*/
void SetReference(const K& key,
const Persistent<Object>& parent) {
GetIsolate()->SetReference(
reinterpret_cast<internal::Object**>(parent.val_),
reinterpret_cast<internal::Object**>(FromVal(Traits::Get(&impl_, key))));
}
/**
* Call V8::RegisterExternallyReferencedObject with the map value for given
* key.

View File

@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 6
#define V8_MINOR_VERSION 5
#define V8_BUILD_NUMBER 254
#define V8_PATCH_LEVEL 43
#define V8_MINOR_VERSION 6
#define V8_BUILD_NUMBER 346
#define V8_PATCH_LEVEL 23
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)

257
deps/v8/include/v8.h vendored
View File

@ -145,7 +145,7 @@ class Heap;
class HeapObject;
class Isolate;
class Object;
struct StreamedSource;
struct ScriptStreamingData;
template<typename T> class CustomArguments;
class PropertyCallbackArguments;
class FunctionCallbackArguments;
@ -553,6 +553,14 @@ template <class T> class PersistentBase {
// TODO(dcarney): remove this.
V8_INLINE void ClearWeak() { ClearWeak<void>(); }
/**
* Annotates the strong handle with the given label, which is then used by the
* heap snapshot generator as a name of the edge from the root to the handle.
* The function does not take ownership of the label and assumes that the
* label is valid as long as the handle is valid.
*/
V8_INLINE void AnnotateStrongRetainer(const char* label);
/**
* Allows the embedder to tell the v8 garbage collector that a certain object
* is alive. Only allowed when the embedder is asked to trace its heap by
@ -945,7 +953,7 @@ class V8_EXPORT EscapableHandleScope : public HandleScope {
*/
class V8_EXPORT SealHandleScope {
public:
SealHandleScope(Isolate* isolate);
explicit SealHandleScope(Isolate* isolate);
~SealHandleScope();
SealHandleScope(const SealHandleScope&) = delete;
@ -1136,6 +1144,7 @@ class V8_EXPORT Module {
public:
/**
* The different states a module can be in.
*
* This corresponds to the states used in ECMAScript except that "evaluated"
* is split into kEvaluated and kErrored, indicating success and failure,
* respectively.
@ -1186,7 +1195,7 @@ class V8_EXPORT Module {
Local<Module> referrer);
/**
* ModuleDeclarationInstantiation
* Instantiates the module and its dependencies.
*
* Returns an empty Maybe<bool> if an exception occurred during
* instantiation. (In the case where the callback throws an exception, that
@ -1196,16 +1205,19 @@ class V8_EXPORT Module {
ResolveCallback callback);
/**
* ModuleEvaluation
* Evaluates the module and its dependencies.
*
* Returns the completion value.
* TODO(neis): Be more precise or say nothing.
* If status is kInstantiated, run the module's code. On success, set status
* to kEvaluated and return the completion value; on failure, set status to
* kErrored and propagate the thrown exception (which is then also available
* via |GetException|).
*/
V8_WARN_UNUSED_RESULT MaybeLocal<Value> Evaluate(Local<Context> context);
/**
* Returns the namespace object of this module.
* The module's status must be kEvaluated.
*
* The module's status must be at least kInstantiated.
*/
Local<Value> GetModuleNamespace();
};
@ -1219,24 +1231,23 @@ class V8_EXPORT Script {
/**
* A shorthand for ScriptCompiler::Compile().
*/
static V8_DEPRECATE_SOON(
"Use maybe version",
Local<Script> Compile(Local<String> source,
ScriptOrigin* origin = nullptr));
static V8_DEPRECATED("Use maybe version",
Local<Script> Compile(Local<String> source,
ScriptOrigin* origin = nullptr));
static V8_WARN_UNUSED_RESULT MaybeLocal<Script> Compile(
Local<Context> context, Local<String> source,
ScriptOrigin* origin = nullptr);
static Local<Script> V8_DEPRECATE_SOON("Use maybe version",
Compile(Local<String> source,
Local<String> file_name));
static Local<Script> V8_DEPRECATED("Use maybe version",
Compile(Local<String> source,
Local<String> file_name));
/**
* Runs the script returning the resulting value. It will be run in the
* context in which it was created (ScriptCompiler::CompileBound or
* UnboundScript::BindToCurrentContext()).
*/
V8_DEPRECATE_SOON("Use maybe version", Local<Value> Run());
V8_DEPRECATED("Use maybe version", Local<Value> Run());
V8_WARN_UNUSED_RESULT MaybeLocal<Value> Run(Local<Context> context);
/**
@ -1395,14 +1406,14 @@ class V8_EXPORT ScriptCompiler {
// object is alive.
const CachedData* GetCachedData() const;
internal::StreamedSource* impl() const { return impl_; }
internal::ScriptStreamingData* impl() const { return impl_; }
// Prevent copying.
StreamedSource(const StreamedSource&) = delete;
StreamedSource& operator=(const StreamedSource&) = delete;
private:
internal::StreamedSource* impl_;
internal::ScriptStreamingData* impl_;
};
/**
@ -1546,13 +1557,13 @@ class V8_EXPORT ScriptCompiler {
* It is possible to specify multiple context extensions (obj in the above
* example).
*/
static V8_DEPRECATE_SOON("Use maybe version",
Local<Function> CompileFunctionInContext(
Isolate* isolate, Source* source,
Local<Context> context, size_t arguments_count,
Local<String> arguments[],
size_t context_extension_count,
Local<Object> context_extensions[]));
static V8_DEPRECATED("Use maybe version",
Local<Function> CompileFunctionInContext(
Isolate* isolate, Source* source,
Local<Context> context, size_t arguments_count,
Local<String> arguments[],
size_t context_extension_count,
Local<Object> context_extensions[]));
static V8_WARN_UNUSED_RESULT MaybeLocal<Function> CompileFunctionInContext(
Local<Context> context, Source* source, size_t arguments_count,
Local<String> arguments[], size_t context_extension_count,
@ -1580,7 +1591,7 @@ class V8_EXPORT Message {
public:
Local<String> Get() const;
V8_DEPRECATE_SOON("Use maybe version", Local<String> GetSourceLine() const);
V8_DEPRECATED("Use maybe version", Local<String> GetSourceLine() const);
V8_WARN_UNUSED_RESULT MaybeLocal<String> GetSourceLine(
Local<Context> context) const;
@ -1606,7 +1617,7 @@ class V8_EXPORT Message {
/**
* Returns the number, 1-based, of the line where the error occurred.
*/
V8_DEPRECATE_SOON("Use maybe version", int GetLineNumber() const);
V8_DEPRECATED("Use maybe version", int GetLineNumber() const);
V8_WARN_UNUSED_RESULT Maybe<int> GetLineNumber(Local<Context> context) const;
/**
@ -1630,7 +1641,7 @@ class V8_EXPORT Message {
* Returns the index within the line of the first character where
* the error occurred.
*/
V8_DEPRECATE_SOON("Use maybe version", int GetStartColumn() const);
V8_DEPRECATED("Use maybe version", int GetStartColumn() const);
V8_WARN_UNUSED_RESULT Maybe<int> GetStartColumn(Local<Context> context) const;
/**
@ -1864,7 +1875,7 @@ class V8_EXPORT ValueSerializer {
* SharedArrayBuffer object. The embedder must return an ID for the
* object, using the same ID if this SharedArrayBuffer has already been
* serialized in this buffer. When deserializing, this ID will be passed to
* ValueDeserializer::TransferSharedArrayBuffer as |transfer_id|.
* ValueDeserializer::GetSharedArrayBufferFromId as |clone_id|.
*
* If the object cannot be serialized, an
* exception should be thrown and Nothing<uint32_t>() returned.
@ -1991,6 +2002,13 @@ class V8_EXPORT ValueDeserializer {
*/
virtual MaybeLocal<WasmCompiledModule> GetWasmModuleFromId(
Isolate* isolate, uint32_t transfer_id);
/**
* Get a SharedArrayBuffer given a clone_id previously provided
* by ValueSerializer::GetSharedArrayBufferId
*/
virtual MaybeLocal<SharedArrayBuffer> GetSharedArrayBufferFromId(
Isolate* isolate, uint32_t clone_id);
};
ValueDeserializer(Isolate* isolate, const uint8_t* data, size_t size);
@ -2308,6 +2326,16 @@ class V8_EXPORT Value : public Data {
*/
bool IsFloat64Array() const;
/**
* Returns true if this value is a BigInt64Array.
*/
bool IsBigInt64Array() const;
/**
* Returns true if this value is a BigUint64Array.
*/
bool IsBigUint64Array() const;
/**
* Returns true if this value is a DataView.
*/
@ -2783,8 +2811,8 @@ class V8_EXPORT String : public Name {
*/
class V8_EXPORT Utf8Value {
public:
V8_DEPRECATE_SOON("Use Isolate version",
explicit Utf8Value(Local<v8::Value> obj));
V8_DEPRECATED("Use Isolate version",
explicit Utf8Value(Local<v8::Value> obj));
Utf8Value(Isolate* isolate, Local<v8::Value> obj);
~Utf8Value();
char* operator*() { return str_; }
@ -2808,8 +2836,7 @@ class V8_EXPORT String : public Name {
*/
class V8_EXPORT Value {
public:
V8_DEPRECATE_SOON("Use Isolate version",
explicit Value(Local<v8::Value> obj));
V8_DEPRECATED("Use Isolate version", explicit Value(Local<v8::Value> obj));
Value(Isolate* isolate, Local<v8::Value> obj);
~Value();
uint16_t* operator*() { return str_; }
@ -3058,6 +3085,12 @@ enum class KeyCollectionMode { kOwnOnly, kIncludePrototypes };
*/
enum class IndexFilter { kIncludeIndices, kSkipIndices };
/**
* kConvertToString will convert integer indices to strings.
* kKeepNumbers will return numbers for integer indices.
*/
enum class KeyConversionMode { kConvertToString, kKeepNumbers };
/**
* Integrity level for objects.
*/
@ -3192,6 +3225,19 @@ class V8_EXPORT Object : public Value {
AccessorNameSetterCallback setter = nullptr,
Local<Value> data = Local<Value>(), PropertyAttribute attributes = None);
/**
* Attempts to create a property with the given name which behaves like a data
* property, except that the provided getter is invoked (and provided with the
* data value) to supply its value the first time it is read. After the
* property is accessed once, it is replaced with an ordinary data property.
*
* Analogous to Template::SetLazyDataProperty.
*/
V8_WARN_UNUSED_RESULT Maybe<bool> SetLazyDataProperty(
Local<Context> context, Local<Name> name,
AccessorNameGetterCallback getter, Local<Value> data = Local<Value>(),
PropertyAttribute attributes = None);
/**
* Functionality for private properties.
* This is an experimental feature, use at your own risk.
@ -3215,7 +3261,8 @@ class V8_EXPORT Object : public Value {
Local<Context> context);
V8_WARN_UNUSED_RESULT MaybeLocal<Array> GetPropertyNames(
Local<Context> context, KeyCollectionMode mode,
PropertyFilter property_filter, IndexFilter index_filter);
PropertyFilter property_filter, IndexFilter index_filter,
KeyConversionMode key_conversion = KeyConversionMode::kKeepNumbers);
/**
* This function has the same functionality as GetPropertyNames but
@ -3233,7 +3280,8 @@ class V8_EXPORT Object : public Value {
* be enumerated by a for-in statement over this object.
*/
V8_WARN_UNUSED_RESULT MaybeLocal<Array> GetOwnPropertyNames(
Local<Context> context, PropertyFilter filter);
Local<Context> context, PropertyFilter filter,
KeyConversionMode key_conversion = KeyConversionMode::kKeepNumbers);
/**
* Get the prototype object. This does not skip objects marked to
@ -4046,11 +4094,15 @@ class V8_EXPORT Proxy : public Object {
class V8_EXPORT WasmCompiledModule : public Object {
public:
typedef std::pair<std::unique_ptr<const uint8_t[]>, size_t> SerializedModule;
// A buffer that is owned by the caller.
/**
* A buffer that is owned by the caller.
*/
typedef std::pair<const uint8_t*, size_t> CallerOwnedBuffer;
// An opaque, native heap object for transferring wasm modules. It
// supports move semantics, and does not support copy semantics.
/**
* An opaque, native heap object for transferring wasm modules. It
* supports move semantics, and does not support copy semantics.
*/
class TransferrableModule final {
public:
TransferrableModule(TransferrableModule&& src) = default;
@ -4069,35 +4121,41 @@ class V8_EXPORT WasmCompiledModule : public Object {
OwnedBuffer wire_bytes = {nullptr, 0};
};
// Get an in-memory, non-persistable, and context-independent (meaning,
// suitable for transfer to another Isolate and Context) representation
// of this wasm compiled module.
/**
* Get an in-memory, non-persistable, and context-independent (meaning,
* suitable for transfer to another Isolate and Context) representation
* of this wasm compiled module.
*/
TransferrableModule GetTransferrableModule();
// Efficiently re-create a WasmCompiledModule, without recompiling, from
// a TransferrableModule.
/**
* Efficiently re-create a WasmCompiledModule, without recompiling, from
* a TransferrableModule.
*/
static MaybeLocal<WasmCompiledModule> FromTransferrableModule(
Isolate* isolate, const TransferrableModule&);
// Get the wasm-encoded bytes that were used to compile this module.
/**
* Get the wasm-encoded bytes that were used to compile this module.
*/
Local<String> GetWasmWireBytes();
// Serialize the compiled module. The serialized data does not include the
// uncompiled bytes.
/**
* Serialize the compiled module. The serialized data does not include the
* uncompiled bytes.
*/
SerializedModule Serialize();
// If possible, deserialize the module, otherwise compile it from the provided
// uncompiled bytes.
/**
* If possible, deserialize the module, otherwise compile it from the provided
* uncompiled bytes.
*/
static MaybeLocal<WasmCompiledModule> DeserializeOrCompile(
Isolate* isolate, const CallerOwnedBuffer& serialized_module,
const CallerOwnedBuffer& wire_bytes);
V8_INLINE static WasmCompiledModule* Cast(Value* obj);
private:
// TODO(ahaas): please remove the friend once streamed compilation is
// implemented
friend class WasmModuleObjectBuilder;
static MaybeLocal<WasmCompiledModule> Deserialize(
Isolate* isolate, const CallerOwnedBuffer& serialized_module,
const CallerOwnedBuffer& wire_bytes);
@ -4117,11 +4175,18 @@ class V8_EXPORT WasmCompiledModule : public Object {
// to simply WasmModuleObjectBuilder
class V8_EXPORT WasmModuleObjectBuilderStreaming final {
public:
WasmModuleObjectBuilderStreaming(Isolate* isolate);
// The buffer passed into OnBytesReceived is owned by the caller.
explicit WasmModuleObjectBuilderStreaming(Isolate* isolate);
/**
* The buffer passed into OnBytesReceived is owned by the caller.
*/
void OnBytesReceived(const uint8_t*, size_t size);
void Finish();
void Abort(Local<Value> exception);
/**
* Abort streaming compilation. If {exception} has a value, then the promise
* associated with streaming compilation is rejected with that value. If
* {exception} does not have value, the promise does not get rejected.
*/
void Abort(MaybeLocal<Value> exception);
Local<Promise> GetPromise();
~WasmModuleObjectBuilderStreaming();
@ -4140,11 +4205,13 @@ class V8_EXPORT WasmModuleObjectBuilderStreaming final {
Isolate* isolate_ = nullptr;
#if V8_CC_MSVC
// We don't need the static Copy API, so the default
// NonCopyablePersistentTraits would be sufficient, however,
// MSVC eagerly instantiates the Copy.
// We ensure we don't use Copy, however, by compiling with the
// defaults everywhere else.
/**
* We don't need the static Copy API, so the default
* NonCopyablePersistentTraits would be sufficient, however,
* MSVC eagerly instantiates the Copy.
* We ensure we don't use Copy, however, by compiling with the
* defaults everywhere else.
*/
Persistent<Promise, CopyablePersistentTraits<Promise>> promise_;
#else
Persistent<Promise> promise_;
@ -4154,30 +4221,6 @@ class V8_EXPORT WasmModuleObjectBuilderStreaming final {
std::shared_ptr<internal::wasm::StreamingDecoder> streaming_decoder_;
};
class V8_EXPORT WasmModuleObjectBuilder final {
public:
WasmModuleObjectBuilder(Isolate* isolate) : isolate_(isolate) {}
// The buffer passed into OnBytesReceived is owned by the caller.
void OnBytesReceived(const uint8_t*, size_t size);
MaybeLocal<WasmCompiledModule> Finish();
private:
Isolate* isolate_ = nullptr;
// TODO(ahaas): We probably need none of this below here once streamed
// compilation is implemented.
typedef std::pair<std::unique_ptr<const uint8_t[]>, size_t> Buffer;
// Disable copy semantics *in this implementation*. We can choose to
// relax this, albeit it's not clear why.
WasmModuleObjectBuilder(const WasmModuleObjectBuilder&) = delete;
WasmModuleObjectBuilder(WasmModuleObjectBuilder&&) = default;
WasmModuleObjectBuilder& operator=(const WasmModuleObjectBuilder&) = delete;
WasmModuleObjectBuilder& operator=(WasmModuleObjectBuilder&&) = default;
std::vector<Buffer> received_buffers_;
size_t total_size_ = 0;
};
#ifndef V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT
// The number of required internal fields can be defined by embedder.
#define V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT 2
@ -4617,6 +4660,37 @@ class V8_EXPORT Float64Array : public TypedArray {
static void CheckCast(Value* obj);
};
/**
* An instance of BigInt64Array constructor.
*/
class V8_EXPORT BigInt64Array : public TypedArray {
public:
static Local<BigInt64Array> New(Local<ArrayBuffer> array_buffer,
size_t byte_offset, size_t length);
static Local<BigInt64Array> New(Local<SharedArrayBuffer> shared_array_buffer,
size_t byte_offset, size_t length);
V8_INLINE static BigInt64Array* Cast(Value* obj);
private:
BigInt64Array();
static void CheckCast(Value* obj);
};
/**
* An instance of BigUint64Array constructor.
*/
class V8_EXPORT BigUint64Array : public TypedArray {
public:
static Local<BigUint64Array> New(Local<ArrayBuffer> array_buffer,
size_t byte_offset, size_t length);
static Local<BigUint64Array> New(Local<SharedArrayBuffer> shared_array_buffer,
size_t byte_offset, size_t length);
V8_INLINE static BigUint64Array* Cast(Value* obj);
private:
BigUint64Array();
static void CheckCast(Value* obj);
};
/**
* An instance of DataView constructor (ES6 draft 15.13.7).
@ -4878,9 +4952,8 @@ class V8_EXPORT RegExp : public Object {
* static_cast<RegExp::Flags>(kGlobal | kMultiline))
* is equivalent to evaluating "/foo/gm".
*/
static V8_DEPRECATE_SOON("Use maybe version",
Local<RegExp> New(Local<String> pattern,
Flags flags));
static V8_DEPRECATED("Use maybe version",
Local<RegExp> New(Local<String> pattern, Flags flags));
static V8_WARN_UNUSED_RESULT MaybeLocal<RegExp> New(Local<Context> context,
Local<String> pattern,
Flags flags);
@ -7437,7 +7510,7 @@ class V8_EXPORT Isolate {
/**
* Enqueues the callback to the Microtask Work Queue
*/
void EnqueueMicrotask(MicrotaskCallback microtask, void* data = NULL);
void EnqueueMicrotask(MicrotaskCallback callback, void* data = nullptr);
/**
* Controls how Microtasks are invoked. See MicrotasksPolicy for details.
@ -7967,6 +8040,8 @@ class V8_EXPORT V8 {
WeakCallbackInfo<void>::Callback weak_callback);
static void MakeWeak(internal::Object*** location_addr);
static void* ClearWeak(internal::Object** location);
static void AnnotateStrongRetainer(internal::Object** location,
const char* label);
static Value* Eternalize(Isolate* isolate, Value* handle);
static void RegisterExternallyReferencedObject(internal::Object** object,
@ -8203,7 +8278,7 @@ class V8_EXPORT TryCatch {
* all TryCatch blocks should be stack allocated because the memory
* location itself is compared against JavaScript try/catch blocks.
*/
TryCatch(Isolate* isolate);
explicit TryCatch(Isolate* isolate);
/**
* Unregisters and deletes this try/catch block.
@ -9174,6 +9249,12 @@ P* PersistentBase<T>::ClearWeak() {
V8::ClearWeak(reinterpret_cast<internal::Object**>(this->val_)));
}
template <class T>
void PersistentBase<T>::AnnotateStrongRetainer(const char* label) {
V8::AnnotateStrongRetainer(reinterpret_cast<internal::Object**>(this->val_),
label);
}
template <class T>
void PersistentBase<T>::RegisterExternalReference(Isolate* isolate) const {
if (IsEmpty()) return;

View File

@ -31,11 +31,6 @@ verifiers {
triggered_by: "v8_linux64_asan_rel_ng"
}
builders { name: "v8_linux64_gcc_compile_dbg" }
builders { name: "v8_linux64_gyp_rel_ng" }
builders {
name: "v8_linux64_gyp_rel_ng_triggered"
triggered_by: "v8_linux64_gyp_rel_ng"
}
builders { name: "v8_linux64_rel_ng" }
builders {
name: "v8_linux64_rel_ng_triggered"
@ -96,10 +91,7 @@ verifiers {
}
builders { name: "v8_node_linux64_rel" }
builders { name: "v8_presubmit" }
builders {
name: "v8_win64_msvc_compile_rel"
experiment_percentage: 20
}
builders { name: "v8_win64_msvc_compile_rel" }
builders { name: "v8_win64_rel_ng" }
builders {
name: "v8_win64_rel_ng_triggered"

File diff suppressed because it is too large Load Diff

View File

@ -650,10 +650,10 @@ MaybeLocal<String> ReadFile(Isolate* isolate, const string& name) {
size_t size = ftell(file);
rewind(file);
char* chars = new char[size + 1];
chars[size] = '\0';
std::unique_ptr<char> chars(new char[size + 1]);
chars.get()[size] = '\0';
for (size_t i = 0; i < size;) {
i += fread(&chars[i], 1, size - i, file);
i += fread(&chars.get()[i], 1, size - i, file);
if (ferror(file)) {
fclose(file);
return MaybeLocal<String>();
@ -661,8 +661,7 @@ MaybeLocal<String> ReadFile(Isolate* isolate, const string& name) {
}
fclose(file);
MaybeLocal<String> result = String::NewFromUtf8(
isolate, chars, NewStringType::kNormal, static_cast<int>(size));
delete[] chars;
isolate, chars.get(), NewStringType::kNormal, static_cast<int>(size));
return result;
}

View File

@ -1,84 +0,0 @@
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
{
'variables': {
'v8_code': 1,
'v8_enable_i18n_support%': 1,
'v8_toolset_for_shell%': 'target',
},
'includes': ['../gypfiles/toolchain.gypi', '../gypfiles/features.gypi'],
'target_defaults': {
'type': 'executable',
'dependencies': [
'../src/v8.gyp:v8',
'../src/v8.gyp:v8_libbase',
'../src/v8.gyp:v8_libplatform',
],
'include_dirs': [
'..',
],
'conditions': [
['v8_enable_i18n_support==1', {
'dependencies': [
'<(icu_gyp_path):icui18n',
'<(icu_gyp_path):icuuc',
],
}],
['OS=="win" and v8_enable_i18n_support==1', {
'dependencies': [
'<(icu_gyp_path):icudata',
],
}],
],
},
'targets': [
{
'target_name': 'v8_shell',
'sources': [
'shell.cc',
],
'conditions': [
[ 'want_separate_host_toolset==1', {
'toolsets': [ '<(v8_toolset_for_shell)', ],
}],
],
},
{
'target_name': 'hello-world',
'sources': [
'hello-world.cc',
],
},
{
'target_name': 'process',
'sources': [
'process.cc',
],
},
],
}

View File

@ -24,6 +24,6 @@ def PostUploadHook(cl, change, output_api):
return output_api.EnsureCQIncludeTrybotsAreAdded(
cl,
[
'master.tryserver.chromium.linux:linux_chromium_rel_ng'
'luci.chromium.try:linux_chromium_rel_ng'
],
'Automatically added layout test trybots to run tests on CQ.')

View File

@ -143,6 +143,8 @@ void* GetRandomMmapAddr() { return GetPageAllocator()->GetRandomMmapAddr(); }
void* AllocatePages(void* address, size_t size, size_t alignment,
PageAllocator::Permission access) {
DCHECK_EQ(address, AlignedAddress(address, alignment));
DCHECK_EQ(0UL, size & (GetPageAllocator()->AllocatePageSize() - 1));
void* result = nullptr;
for (int i = 0; i < kAllocationTries; ++i) {
result =
@ -160,6 +162,7 @@ void* AllocatePages(void* address, size_t size, size_t alignment,
}
bool FreePages(void* address, const size_t size) {
DCHECK_EQ(0UL, size & (GetPageAllocator()->AllocatePageSize() - 1));
bool result = GetPageAllocator()->FreePages(address, size);
#if defined(LEAK_SANITIZER)
if (result) {
@ -260,7 +263,9 @@ void VirtualMemory::Free() {
size_t size = size_;
CHECK(InVM(address, size));
Reset();
CHECK(FreePages(address, size));
// FreePages expects size to be aligned to allocation granularity. Trimming
// may leave size at only commit granularity. Align it here.
CHECK(FreePages(address, RoundUp(size, AllocatePageSize())));
}
void VirtualMemory::TakeControl(VirtualMemory* from) {

View File

@ -13,13 +13,16 @@
namespace v8 {
namespace internal {
Handle<Object> FunctionCallbackArguments::Call(FunctionCallback f) {
Handle<Object> FunctionCallbackArguments::Call(CallHandlerInfo* handler) {
Isolate* isolate = this->isolate();
LOG(isolate, ApiObjectAccess("call", holder()));
RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kFunctionCallback);
v8::FunctionCallback f =
v8::ToCData<v8::FunctionCallback>(handler->callback());
if (isolate->needs_side_effect_check() &&
!isolate->debug()->PerformSideEffectCheckForCallback(FUNCTION_ADDR(f))) {
return Handle<Object>();
}
RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kFunctionCallback);
VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
FunctionCallbackInfo<v8::Value> info(begin(), argv_, argc_);

View File

@ -19,7 +19,8 @@ template <int kArrayLength>
class CustomArgumentsBase : public Relocatable {
public:
virtual inline void IterateInstance(RootVisitor* v) {
v->VisitRootPointers(Root::kRelocatable, values_, values_ + kArrayLength);
v->VisitRootPointers(Root::kRelocatable, nullptr, values_,
values_ + kArrayLength);
}
protected:
@ -215,9 +216,13 @@ class FunctionCallbackArguments
* and used if it's been set to anything inside the callback.
* New style callbacks always use the return value.
*/
Handle<Object> Call(FunctionCallback f);
Handle<Object> Call(CallHandlerInfo* handler);
private:
inline JSObject* holder() {
return JSObject::cast(this->begin()[T::kHolderIndex]);
}
internal::Object** argv_;
int argc_;
};

View File

@ -285,10 +285,10 @@ MaybeHandle<JSObject> ProbeInstantiationsCache(Isolate* isolate,
} else if (caching_mode == CachingMode::kUnlimited ||
(serial_number <=
TemplateInfo::kSlowTemplateInstantiationsCacheSize)) {
Handle<NumberDictionary> slow_cache =
Handle<SimpleNumberDictionary> slow_cache =
isolate->slow_template_instantiations_cache();
int entry = slow_cache->FindEntry(serial_number);
if (entry == NumberDictionary::kNotFound) {
if (entry == SimpleNumberDictionary::kNotFound) {
return MaybeHandle<JSObject>();
}
return handle(JSObject::cast(slow_cache->ValueAt(entry)), isolate);
@ -313,9 +313,9 @@ void CacheTemplateInstantiation(Isolate* isolate, int serial_number,
} else if (caching_mode == CachingMode::kUnlimited ||
(serial_number <=
TemplateInfo::kSlowTemplateInstantiationsCacheSize)) {
Handle<NumberDictionary> cache =
Handle<SimpleNumberDictionary> cache =
isolate->slow_template_instantiations_cache();
auto new_cache = NumberDictionary::Set(cache, serial_number, object);
auto new_cache = SimpleNumberDictionary::Set(cache, serial_number, object);
if (*new_cache != *cache) {
isolate->native_context()->set_slow_template_instantiations_cache(
*new_cache);
@ -334,11 +334,11 @@ void UncacheTemplateInstantiation(Isolate* isolate, int serial_number,
} else if (caching_mode == CachingMode::kUnlimited ||
(serial_number <=
TemplateInfo::kSlowTemplateInstantiationsCacheSize)) {
Handle<NumberDictionary> cache =
Handle<SimpleNumberDictionary> cache =
isolate->slow_template_instantiations_cache();
int entry = cache->FindEntry(serial_number);
DCHECK_NE(NumberDictionary::kNotFound, entry);
cache = NumberDictionary::DeleteEntry(cache, entry);
DCHECK_NE(SimpleNumberDictionary::kNotFound, entry);
cache = SimpleNumberDictionary::DeleteEntry(cache, entry);
isolate->native_context()->set_slow_template_instantiations_cache(*cache);
}
}
@ -726,7 +726,6 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
// Mark instance as callable in the map.
if (!obj->instance_call_handler()->IsUndefined(isolate)) {
map->set_is_callable(true);
map->set_is_constructor(true);
}
if (immutable_proto) map->set_is_immutable_proto(true);

View File

@ -5,6 +5,8 @@
#ifndef V8_API_NATIVES_H_
#define V8_API_NATIVES_H_
#include "include/v8.h"
#include "src/base/macros.h"
#include "src/handles.h"
#include "src/property-details.h"
@ -62,4 +64,4 @@ class ApiNatives {
} // namespace internal
} // namespace v8
#endif
#endif // V8_API_NATIVES_H_

552
deps/v8/src/api.cc vendored
View File

@ -11,7 +11,6 @@
#include <cmath> // For isnan.
#include <limits>
#include <vector>
#include "include/v8-debug.h"
#include "include/v8-profiler.h"
#include "include/v8-testing.h"
#include "include/v8-util.h"
@ -34,6 +33,7 @@
#include "src/conversions-inl.h"
#include "src/counters.h"
#include "src/debug/debug-coverage.h"
#include "src/debug/debug-evaluate.h"
#include "src/debug/debug-type-profile.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
@ -49,7 +49,6 @@
#include "src/json-stringifier.h"
#include "src/messages.h"
#include "src/objects-inl.h"
#include "src/parsing/background-parsing-task.h"
#include "src/parsing/parser.h"
#include "src/parsing/scanner-character-streams.h"
#include "src/pending-compilation-error-handler.h"
@ -461,16 +460,7 @@ void* v8::ArrayBuffer::Allocator::Reserve(size_t length) { UNIMPLEMENTED(); }
void v8::ArrayBuffer::Allocator::Free(void* data, size_t length,
AllocationMode mode) {
switch (mode) {
case AllocationMode::kNormal: {
Free(data, length);
return;
}
case AllocationMode::kReservation: {
UNIMPLEMENTED();
return;
}
}
UNIMPLEMENTED();
}
void v8::ArrayBuffer::Allocator::SetProtection(
@ -483,7 +473,7 @@ namespace {
class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
public:
virtual void* Allocate(size_t length) {
void* Allocate(size_t length) override {
#if V8_OS_AIX && _LINUX_SOURCE_COMPAT
// Work around for GCC bug on AIX
// See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79839
@ -494,7 +484,7 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
return data;
}
virtual void* AllocateUninitialized(size_t length) {
void* AllocateUninitialized(size_t length) override {
#if V8_OS_AIX && _LINUX_SOURCE_COMPAT
// Work around for GCC bug on AIX
// See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79839
@ -505,42 +495,7 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
return data;
}
virtual void Free(void* data, size_t) { free(data); }
virtual void* Reserve(size_t length) {
size_t page_size = i::AllocatePageSize();
size_t allocated = RoundUp(length, page_size);
void* address = i::AllocatePages(i::GetRandomMmapAddr(), allocated,
page_size, PageAllocator::kNoAccess);
return address;
}
virtual void Free(void* data, size_t length,
v8::ArrayBuffer::Allocator::AllocationMode mode) {
switch (mode) {
case v8::ArrayBuffer::Allocator::AllocationMode::kNormal: {
return Free(data, length);
}
case v8::ArrayBuffer::Allocator::AllocationMode::kReservation: {
size_t page_size = i::AllocatePageSize();
size_t allocated = RoundUp(length, page_size);
CHECK(i::FreePages(data, allocated));
return;
}
}
}
virtual void SetProtection(
void* data, size_t length,
v8::ArrayBuffer::Allocator::Protection protection) {
DCHECK(protection == v8::ArrayBuffer::Allocator::Protection::kNoAccess ||
protection == v8::ArrayBuffer::Allocator::Protection::kReadWrite);
PageAllocator::Permission permission =
(protection == v8::ArrayBuffer::Allocator::Protection::kReadWrite)
? PageAllocator::kReadWrite
: PageAllocator::kNoAccess;
CHECK(i::SetPermissions(data, length, permission));
}
void Free(void* data, size_t) override { free(data); }
};
bool RunExtraCode(Isolate* isolate, Local<Context> context,
@ -1069,6 +1024,10 @@ void* V8::ClearWeak(i::Object** location) {
return i::GlobalHandles::ClearWeakness(location);
}
void V8::AnnotateStrongRetainer(i::Object** location, const char* label) {
i::GlobalHandles::AnnotateStrongRetainer(location, label);
}
void V8::DisposeGlobal(i::Object** location) {
i::GlobalHandles::Destroy(location);
}
@ -2069,11 +2028,9 @@ bool ScriptCompiler::ExternalSourceStream::SetBookmark() { return false; }
void ScriptCompiler::ExternalSourceStream::ResetToBookmark() { UNREACHABLE(); }
ScriptCompiler::StreamedSource::StreamedSource(ExternalSourceStream* stream,
Encoding encoding)
: impl_(new i::StreamedSource(stream, encoding)) {}
: impl_(new i::ScriptStreamingData(stream, encoding)) {}
ScriptCompiler::StreamedSource::~StreamedSource() { delete impl_; }
@ -2358,6 +2315,37 @@ MaybeLocal<Value> Module::Evaluate(Local<Context> context) {
RETURN_ESCAPED(result);
}
namespace {
i::Compiler::ScriptDetails GetScriptDetails(
i::Isolate* isolate, Local<Value> resource_name,
Local<Integer> resource_line_offset, Local<Integer> resource_column_offset,
Local<Value> source_map_url, Local<PrimitiveArray> host_defined_options) {
i::Compiler::ScriptDetails script_details;
if (!resource_name.IsEmpty()) {
script_details.name_obj = Utils::OpenHandle(*(resource_name));
}
if (!resource_line_offset.IsEmpty()) {
script_details.line_offset =
static_cast<int>(resource_line_offset->Value());
}
if (!resource_column_offset.IsEmpty()) {
script_details.column_offset =
static_cast<int>(resource_column_offset->Value());
}
script_details.host_defined_options = isolate->factory()->empty_fixed_array();
if (!host_defined_options.IsEmpty()) {
script_details.host_defined_options =
Utils::OpenHandle(*(host_defined_options));
}
if (!source_map_url.IsEmpty()) {
script_details.source_map_url = Utils::OpenHandle(*(source_map_url));
}
return script_details;
}
} // namespace
MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
Isolate* v8_isolate, Source* source, CompileOptions options,
NoCacheReason no_cache_reason) {
@ -2366,17 +2354,21 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
ENTER_V8_NO_SCRIPT(isolate, v8_isolate->GetCurrentContext(), ScriptCompiler,
CompileUnbound, MaybeLocal<UnboundScript>(),
InternalEscapableScope);
bool produce_cache = options == kProduceParserCache ||
options == kProduceCodeCache ||
options == kProduceFullCodeCache;
// Don't try to produce any kind of cache when the debugger is loaded.
if (isolate->debug()->is_loaded() && produce_cache) {
// ProduceParserCache, ProduceCodeCache, ProduceFullCodeCache and
// ConsumeParserCache are not supported. They are present only for
// backward compatability. All these options behave as kNoCompileOptions.
if (options == kConsumeParserCache) {
// We do not support parser caches anymore. Just set cached_data to
// rejected to signal an error.
options = kNoCompileOptions;
source->cached_data->rejected = true;
} else if (options == kProduceParserCache || options == kProduceCodeCache ||
options == kProduceFullCodeCache) {
options = kNoCompileOptions;
}
i::ScriptData* script_data = nullptr;
if (options == kConsumeParserCache || options == kConsumeCodeCache) {
if (options == kConsumeCodeCache) {
DCHECK(source->cached_data);
// ScriptData takes care of pointer-aligning the data.
script_data = new i::ScriptData(source->cached_data->data,
@ -2386,32 +2378,14 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
i::Handle<i::String> str = Utils::OpenHandle(*(source->source_string));
i::Handle<i::SharedFunctionInfo> result;
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileScript");
i::MaybeHandle<i::Object> name_obj;
i::MaybeHandle<i::Object> source_map_url;
i::MaybeHandle<i::FixedArray> host_defined_options =
isolate->factory()->empty_fixed_array();
int line_offset = 0;
int column_offset = 0;
if (!source->resource_name.IsEmpty()) {
name_obj = Utils::OpenHandle(*(source->resource_name));
}
if (!source->host_defined_options.IsEmpty()) {
host_defined_options = Utils::OpenHandle(*(source->host_defined_options));
}
if (!source->resource_line_offset.IsEmpty()) {
line_offset = static_cast<int>(source->resource_line_offset->Value());
}
if (!source->resource_column_offset.IsEmpty()) {
column_offset = static_cast<int>(source->resource_column_offset->Value());
}
if (!source->source_map_url.IsEmpty()) {
source_map_url = Utils::OpenHandle(*(source->source_map_url));
}
i::Compiler::ScriptDetails script_details = GetScriptDetails(
isolate, source->resource_name, source->resource_line_offset,
source->resource_column_offset, source->source_map_url,
source->host_defined_options);
i::MaybeHandle<i::SharedFunctionInfo> maybe_function_info =
i::Compiler::GetSharedFunctionInfoForScript(
str, name_obj, line_offset, column_offset, source->resource_options,
source_map_url, isolate->native_context(), nullptr, &script_data,
options, no_cache_reason, i::NOT_NATIVES_CODE, host_defined_options);
str, script_details, source->resource_options, nullptr, &script_data,
options, no_cache_reason, i::NOT_NATIVES_CODE);
has_pending_exception = !maybe_function_info.ToHandle(&result);
if (has_pending_exception && script_data != nullptr) {
// This case won't happen during normal operation; we have compiled
@ -2422,13 +2396,7 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
}
RETURN_ON_FAILED_EXECUTION(UnboundScript);
if (produce_cache && script_data != nullptr) {
// script_data now contains the data that was generated. source will
// take the ownership.
source->cached_data = new CachedData(
script_data->data(), script_data->length(), CachedData::BufferOwned);
script_data->ReleaseDataOwnership();
} else if (options == kConsumeParserCache || options == kConsumeCodeCache) {
if (options == kConsumeCodeCache) {
source->cached_data->rejected = script_data->rejected();
}
delete script_data;
@ -2593,9 +2561,11 @@ ScriptCompiler::ScriptStreamingTask* ScriptCompiler::StartStreamingScript(
if (!i::FLAG_script_streaming) {
return nullptr;
}
// We don't support other compile options on streaming background compiles.
// TODO(rmcilroy): remove CompileOptions from the API.
CHECK(options == ScriptCompiler::kNoCompileOptions);
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
return new i::BackgroundParsingTask(source->impl(), options,
i::FLAG_stack_size, isolate);
return i::Compiler::NewBackgroundCompileTask(source->impl(), isolate);
}
@ -2605,59 +2575,24 @@ MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
const ScriptOrigin& origin) {
PREPARE_FOR_EXECUTION(context, ScriptCompiler, Compile, Script);
TRACE_EVENT_CALL_STATS_SCOPED(isolate, "v8", "V8.ScriptCompiler");
i::StreamedSource* source = v8_source->impl();
i::Handle<i::String> str = Utils::OpenHandle(*(full_source_string));
i::Handle<i::Script> script = isolate->factory()->NewScript(str);
if (isolate->NeedsSourcePositionsForProfiling()) {
i::Script::InitLineEnds(script);
}
if (!origin.ResourceName().IsEmpty()) {
script->set_name(*Utils::OpenHandle(*(origin.ResourceName())));
}
if (!origin.HostDefinedOptions().IsEmpty()) {
script->set_host_defined_options(
*Utils::OpenHandle(*(origin.HostDefinedOptions())));
}
if (!origin.ResourceLineOffset().IsEmpty()) {
script->set_line_offset(
static_cast<int>(origin.ResourceLineOffset()->Value()));
}
if (!origin.ResourceColumnOffset().IsEmpty()) {
script->set_column_offset(
static_cast<int>(origin.ResourceColumnOffset()->Value()));
}
script->set_origin_options(origin.Options());
if (!origin.SourceMapUrl().IsEmpty()) {
script->set_source_mapping_url(
*Utils::OpenHandle(*(origin.SourceMapUrl())));
}
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompileStreamedScript");
source->info->set_script(script);
source->parser->UpdateStatistics(isolate, script);
source->info->UpdateBackgroundParseStatisticsOnMainThread(isolate);
source->parser->HandleSourceURLComments(isolate, script);
i::Handle<i::String> str = Utils::OpenHandle(*(full_source_string));
i::Compiler::ScriptDetails script_details = GetScriptDetails(
isolate, origin.ResourceName(), origin.ResourceLineOffset(),
origin.ResourceColumnOffset(), origin.SourceMapUrl(),
origin.HostDefinedOptions());
i::ScriptStreamingData* streaming_data = v8_source->impl();
i::MaybeHandle<i::SharedFunctionInfo> maybe_function_info =
i::Compiler::GetSharedFunctionInfoForStreamedScript(
str, script_details, origin.Options(), streaming_data);
i::Handle<i::SharedFunctionInfo> result;
if (source->info->literal() == nullptr) {
// Parsing has failed - report error messages.
source->info->pending_error_handler()->ReportErrors(
isolate, script, source->info->ast_value_factory());
} else {
// Parsing has succeeded - finalize compile.
if (i::FLAG_background_compile) {
result = i::Compiler::GetSharedFunctionInfoForBackgroundCompile(
script, source->info.get(), str->length(),
source->outer_function_job.get(), &source->inner_function_jobs);
} else {
result = i::Compiler::GetSharedFunctionInfoForStreamedScript(
script, source->info.get(), str->length());
}
}
has_pending_exception = result.is_null();
has_pending_exception = !maybe_function_info.ToHandle(&result);
if (has_pending_exception) isolate->ReportPendingMessages();
source->Release();
RETURN_ON_FAILED_EXECUTION(Script);
Local<UnboundScript> generic = ToApiHandle<UnboundScript>(result);
@ -3304,6 +3239,16 @@ MaybeLocal<WasmCompiledModule> ValueDeserializer::Delegate::GetWasmModuleFromId(
return MaybeLocal<WasmCompiledModule>();
}
MaybeLocal<SharedArrayBuffer>
ValueDeserializer::Delegate::GetSharedArrayBufferFromId(Isolate* v8_isolate,
uint32_t id) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
isolate->ScheduleThrow(*isolate->factory()->NewError(
isolate->error_function(),
i::MessageTemplate::kDataCloneDeserializationError));
return MaybeLocal<SharedArrayBuffer>();
}
struct ValueDeserializer::PrivateData {
PrivateData(i::Isolate* i, i::Vector<const uint8_t> data, Delegate* delegate)
: isolate(i), deserializer(i, data, delegate) {}
@ -3544,24 +3489,22 @@ bool Value::IsWebAssemblyCompiledModule() const {
js_obj->map()->GetConstructor();
}
#define VALUE_IS_SPECIFIC_TYPE(Type, Class) \
bool Value::Is##Type() const { \
i::Handle<i::Object> obj = Utils::OpenHandle(this); \
if (!obj->IsHeapObject()) return false; \
i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate(); \
return obj->HasSpecificClassOf(isolate->heap()->Class##_string()); \
#define VALUE_IS_SPECIFIC_TYPE(Type, Check) \
bool Value::Is##Type() const { \
i::Handle<i::Object> obj = Utils::OpenHandle(this); \
return obj->Is##Check(); \
}
VALUE_IS_SPECIFIC_TYPE(ArgumentsObject, Arguments)
VALUE_IS_SPECIFIC_TYPE(BooleanObject, Boolean)
VALUE_IS_SPECIFIC_TYPE(NumberObject, Number)
VALUE_IS_SPECIFIC_TYPE(StringObject, String)
VALUE_IS_SPECIFIC_TYPE(SymbolObject, Symbol)
VALUE_IS_SPECIFIC_TYPE(Date, Date)
VALUE_IS_SPECIFIC_TYPE(Map, Map)
VALUE_IS_SPECIFIC_TYPE(Set, Set)
VALUE_IS_SPECIFIC_TYPE(WeakMap, WeakMap)
VALUE_IS_SPECIFIC_TYPE(WeakSet, WeakSet)
VALUE_IS_SPECIFIC_TYPE(ArgumentsObject, JSArgumentsObject)
VALUE_IS_SPECIFIC_TYPE(BooleanObject, BooleanWrapper)
VALUE_IS_SPECIFIC_TYPE(NumberObject, NumberWrapper)
VALUE_IS_SPECIFIC_TYPE(StringObject, StringWrapper)
VALUE_IS_SPECIFIC_TYPE(SymbolObject, SymbolWrapper)
VALUE_IS_SPECIFIC_TYPE(Date, JSDate)
VALUE_IS_SPECIFIC_TYPE(Map, JSMap)
VALUE_IS_SPECIFIC_TYPE(Set, JSSet)
VALUE_IS_SPECIFIC_TYPE(WeakMap, JSWeakMap)
VALUE_IS_SPECIFIC_TYPE(WeakSet, JSWeakSet)
#undef VALUE_IS_SPECIFIC_TYPE
@ -3953,55 +3896,36 @@ void v8::SharedArrayBuffer::CheckCast(Value* that) {
void v8::Date::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
i::Isolate* isolate = nullptr;
if (obj->IsHeapObject()) isolate = i::HeapObject::cast(*obj)->GetIsolate();
Utils::ApiCheck(isolate != nullptr &&
obj->HasSpecificClassOf(isolate->heap()->Date_string()),
"v8::Date::Cast()", "Could not convert to date");
Utils::ApiCheck(obj->IsJSDate(), "v8::Date::Cast()",
"Could not convert to date");
}
void v8::StringObject::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
i::Isolate* isolate = nullptr;
if (obj->IsHeapObject()) isolate = i::HeapObject::cast(*obj)->GetIsolate();
Utils::ApiCheck(isolate != nullptr &&
obj->HasSpecificClassOf(isolate->heap()->String_string()),
"v8::StringObject::Cast()",
Utils::ApiCheck(obj->IsStringWrapper(), "v8::StringObject::Cast()",
"Could not convert to StringObject");
}
void v8::SymbolObject::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
i::Isolate* isolate = nullptr;
if (obj->IsHeapObject()) isolate = i::HeapObject::cast(*obj)->GetIsolate();
Utils::ApiCheck(isolate != nullptr &&
obj->HasSpecificClassOf(isolate->heap()->Symbol_string()),
"v8::SymbolObject::Cast()",
Utils::ApiCheck(obj->IsSymbolWrapper(), "v8::SymbolObject::Cast()",
"Could not convert to SymbolObject");
}
void v8::NumberObject::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
i::Isolate* isolate = nullptr;
if (obj->IsHeapObject()) isolate = i::HeapObject::cast(*obj)->GetIsolate();
Utils::ApiCheck(isolate != nullptr &&
obj->HasSpecificClassOf(isolate->heap()->Number_string()),
"v8::NumberObject::Cast()",
Utils::ApiCheck(obj->IsNumberWrapper(), "v8::NumberObject::Cast()",
"Could not convert to NumberObject");
}
void v8::BooleanObject::CheckCast(v8::Value* that) {
i::Handle<i::Object> obj = Utils::OpenHandle(that);
i::Isolate* isolate = nullptr;
if (obj->IsHeapObject()) isolate = i::HeapObject::cast(*obj)->GetIsolate();
Utils::ApiCheck(
isolate != nullptr &&
obj->HasSpecificClassOf(isolate->heap()->Boolean_string()),
"v8::BooleanObject::Cast()", "Could not convert to BooleanObject");
Utils::ApiCheck(obj->IsBooleanWrapper(), "v8::BooleanObject::Cast()",
"Could not convert to BooleanObject");
}
@ -4432,7 +4356,7 @@ Maybe<bool> v8::Object::SetPrivate(Local<Context> context, Local<Private> key,
desc.set_enumerable(false);
desc.set_configurable(true);
desc.set_value(value_obj);
return i::JSProxy::SetPrivateProperty(
return i::JSProxy::SetPrivateSymbol(
isolate, i::Handle<i::JSProxy>::cast(self),
i::Handle<i::Symbol>::cast(key_obj), &desc, i::kDontThrow);
}
@ -4577,10 +4501,10 @@ MaybeLocal<Array> v8::Object::GetPropertyNames(Local<Context> context) {
v8::IndexFilter::kIncludeIndices);
}
MaybeLocal<Array> v8::Object::GetPropertyNames(Local<Context> context,
KeyCollectionMode mode,
PropertyFilter property_filter,
IndexFilter index_filter) {
MaybeLocal<Array> v8::Object::GetPropertyNames(
Local<Context> context, KeyCollectionMode mode,
PropertyFilter property_filter, IndexFilter index_filter,
KeyConversionMode key_conversion) {
PREPARE_FOR_EXECUTION(context, Object, GetPropertyNames, Array);
auto self = Utils::OpenHandle(this);
i::Handle<i::FixedArray> value;
@ -4590,7 +4514,8 @@ MaybeLocal<Array> v8::Object::GetPropertyNames(Local<Context> context,
accumulator.set_skip_indices(index_filter == IndexFilter::kSkipIndices);
has_pending_exception = accumulator.CollectKeys(self, self).IsNothing();
RETURN_ON_FAILED_EXECUTION(Array);
value = accumulator.GetKeys(i::GetKeysConversion::kKeepNumbers);
value =
accumulator.GetKeys(static_cast<i::GetKeysConversion>(key_conversion));
DCHECK(self->map()->EnumLength() == i::kInvalidEnumCacheSentinel ||
self->map()->EnumLength() == 0 ||
self->map()->instance_descriptors()->GetEnumCache()->keys() != *value);
@ -4614,10 +4539,11 @@ Local<Array> v8::Object::GetOwnPropertyNames() {
RETURN_TO_LOCAL_UNCHECKED(GetOwnPropertyNames(context), Array);
}
MaybeLocal<Array> v8::Object::GetOwnPropertyNames(Local<Context> context,
PropertyFilter filter) {
MaybeLocal<Array> v8::Object::GetOwnPropertyNames(
Local<Context> context, PropertyFilter filter,
KeyConversionMode key_conversion) {
return GetPropertyNames(context, KeyCollectionMode::kOwnOnly, filter,
v8::IndexFilter::kIncludeIndices);
v8::IndexFilter::kIncludeIndices, key_conversion);
}
MaybeLocal<String> v8::Object::ObjectProtoToString(Local<Context> context) {
@ -4754,14 +4680,14 @@ Maybe<bool> v8::Object::Has(Local<Context> context, uint32_t index) {
return maybe;
}
template <typename Getter, typename Setter, typename Data>
static Maybe<bool> ObjectSetAccessor(Local<Context> context, Object* self,
Local<Name> name, Getter getter,
Setter setter, Data data,
AccessControl settings,
PropertyAttribute attributes,
bool is_special_data_property) {
bool is_special_data_property,
bool replace_on_access) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
ENTER_V8_NO_SCRIPT(isolate, context, Object, SetAccessor, Nothing<bool>(),
i::HandleScope);
@ -4771,7 +4697,7 @@ static Maybe<bool> ObjectSetAccessor(Local<Context> context, Object* self,
v8::Local<AccessorSignature> signature;
i::Handle<i::AccessorInfo> info =
MakeAccessorInfo(isolate, name, getter, setter, data, settings, signature,
is_special_data_property, false);
is_special_data_property, replace_on_access);
if (info.is_null()) return Nothing<bool>();
bool fast = obj->HasFastProperties();
i::Handle<i::Object> result;
@ -4797,7 +4723,7 @@ Maybe<bool> Object::SetAccessor(Local<Context> context, Local<Name> name,
PropertyAttribute attribute) {
return ObjectSetAccessor(context, this, name, getter, setter,
data.FromMaybe(Local<Value>()), settings, attribute,
i::FLAG_disable_old_api_accessors);
i::FLAG_disable_old_api_accessors, false);
}
@ -4827,7 +4753,17 @@ Maybe<bool> Object::SetNativeDataProperty(v8::Local<v8::Context> context,
v8::Local<Value> data,
PropertyAttribute attributes) {
return ObjectSetAccessor(context, this, name, getter, setter, data, DEFAULT,
attributes, true);
attributes, true, false);
}
Maybe<bool> Object::SetLazyDataProperty(v8::Local<v8::Context> context,
v8::Local<Name> name,
AccessorNameGetterCallback getter,
v8::Local<Value> data,
PropertyAttribute attributes) {
return ObjectSetAccessor(context, this, name, getter,
static_cast<AccessorNameSetterCallback>(nullptr),
data, DEFAULT, attributes, true, true);
}
Maybe<bool> v8::Object::HasOwnProperty(Local<Context> context,
@ -7352,13 +7288,11 @@ Local<Array> Set::AsArray() const {
MaybeLocal<Promise::Resolver> Promise::Resolver::New(Local<Context> context) {
PREPARE_FOR_EXECUTION(context, Promise_Resolver, New, Resolver);
i::Handle<i::Object> result;
Local<Promise::Resolver> result;
has_pending_exception =
!i::Execution::Call(isolate, isolate->promise_internal_constructor(),
isolate->factory()->undefined_value(), 0, nullptr)
.ToHandle(&result);
!ToLocal<Promise::Resolver>(isolate->factory()->NewJSPromise(), &result);
RETURN_ON_FAILED_EXECUTION(Promise::Resolver);
RETURN_ESCAPED(Local<Promise::Resolver>::Cast(Utils::ToLocal(result)));
RETURN_ESCAPED(result);
}
@ -7380,12 +7314,14 @@ Maybe<bool> Promise::Resolver::Resolve(Local<Context> context,
ENTER_V8(isolate, context, Promise_Resolver, Resolve, Nothing<bool>(),
i::HandleScope);
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> argv[] = {self, Utils::OpenHandle(*value)};
auto promise = i::Handle<i::JSPromise>::cast(self);
if (promise->status() != Promise::kPending) {
return Just(true);
}
has_pending_exception =
i::Execution::Call(isolate, isolate->promise_resolve(),
isolate->factory()->undefined_value(), arraysize(argv),
argv)
.is_null();
i::JSPromise::Resolve(promise, Utils::OpenHandle(*value)).is_null();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return Just(true);
}
@ -7403,15 +7339,14 @@ Maybe<bool> Promise::Resolver::Reject(Local<Context> context,
ENTER_V8(isolate, context, Promise_Resolver, Reject, Nothing<bool>(),
i::HandleScope);
auto self = Utils::OpenHandle(this);
auto promise = i::Handle<i::JSPromise>::cast(self);
if (promise->status() != Promise::kPending) {
return Just(true);
}
// We pass true to trigger the debugger's on exception handler.
i::Handle<i::Object> argv[] = {self, Utils::OpenHandle(*value),
isolate->factory()->ToBoolean(true)};
has_pending_exception =
i::Execution::Call(isolate, isolate->promise_internal_reject(),
isolate->factory()->undefined_value(), arraysize(argv),
argv)
.is_null();
i::JSPromise::Reject(promise, Utils::OpenHandle(*value)).is_null();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return Just(true);
}
@ -7605,8 +7540,9 @@ MaybeLocal<WasmCompiledModule> WasmCompiledModule::Compile(Isolate* isolate,
if (!i::wasm::IsWasmCodegenAllowed(i_isolate, i_isolate->native_context())) {
return MaybeLocal<WasmCompiledModule>();
}
i::MaybeHandle<i::JSObject> maybe_compiled = i::wasm::SyncCompile(
i_isolate, &thrower, i::wasm::ModuleWireBytes(start, start + length));
i::MaybeHandle<i::JSObject> maybe_compiled =
i_isolate->wasm_engine()->SyncCompile(
i_isolate, &thrower, i::wasm::ModuleWireBytes(start, start + length));
if (maybe_compiled.is_null()) return MaybeLocal<WasmCompiledModule>();
return Local<WasmCompiledModule>::Cast(
Utils::ToLocal(maybe_compiled.ToHandleChecked()));
@ -7665,30 +7601,29 @@ void WasmModuleObjectBuilderStreaming::Finish() {
}
// AsyncCompile makes its own copy of the wire bytes. This inefficiency
// will be resolved when we move to true streaming compilation.
i::wasm::AsyncCompile(reinterpret_cast<i::Isolate*>(isolate_),
Utils::OpenHandle(*promise_.Get(isolate_)),
{wire_bytes.get(), wire_bytes.get() + total_size_},
false);
auto i_isolate = reinterpret_cast<i::Isolate*>(isolate_);
i_isolate->wasm_engine()->AsyncCompile(
i_isolate, Utils::OpenHandle(*promise_.Get(isolate_)),
{wire_bytes.get(), wire_bytes.get() + total_size_}, false);
}
void WasmModuleObjectBuilderStreaming::Abort(Local<Value> exception) {
void WasmModuleObjectBuilderStreaming::Abort(MaybeLocal<Value> exception) {
Local<Promise> promise = GetPromise();
// The promise has already been resolved, e.g. because of a compilation
// error.
if (promise->State() != v8::Promise::kPending) return;
if (i::FLAG_wasm_stream_compilation) streaming_decoder_->Abort();
// If there is no exception, then we do not reject the promise. The reason is
// that 'no exception' indicates that we are in a ScriptForbiddenScope, which
// means that it is not allowed to reject the promise at the moment, or
// execute any other JavaScript code.
// If no exception value is provided, we do not reject the promise. This can
// happen when streaming compilation gets aborted when no script execution is
// allowed anymore, e.g. when a browser tab gets refreshed.
if (exception.IsEmpty()) return;
Local<Promise::Resolver> resolver = promise.As<Promise::Resolver>();
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate_);
i::HandleScope scope(i_isolate);
Local<Context> context = Utils::ToLocal(handle(i_isolate->context()));
auto maybe = resolver->Reject(context, exception);
auto maybe = resolver->Reject(context, exception.ToLocalChecked());
CHECK_IMPLIES(!maybe.FromMaybe(false), i_isolate->has_scheduled_exception());
}
@ -7696,29 +7631,6 @@ WasmModuleObjectBuilderStreaming::~WasmModuleObjectBuilderStreaming() {
promise_.Reset();
}
void WasmModuleObjectBuilder::OnBytesReceived(const uint8_t* bytes,
size_t size) {
std::unique_ptr<uint8_t[]> cloned_bytes(new uint8_t[size]);
memcpy(cloned_bytes.get(), bytes, size);
received_buffers_.push_back(
Buffer(std::unique_ptr<const uint8_t[]>(
const_cast<const uint8_t*>(cloned_bytes.release())),
size));
total_size_ += size;
}
MaybeLocal<WasmCompiledModule> WasmModuleObjectBuilder::Finish() {
std::unique_ptr<uint8_t[]> wire_bytes(new uint8_t[total_size_]);
uint8_t* insert_at = wire_bytes.get();
for (size_t i = 0; i < received_buffers_.size(); ++i) {
const Buffer& buff = received_buffers_[i];
memcpy(insert_at, buff.first.get(), buff.second);
insert_at += buff.second;
}
return WasmCompiledModule::Compile(isolate_, wire_bytes.get(), total_size_);
}
// static
v8::ArrayBuffer::Allocator* v8::ArrayBuffer::Allocator::NewDefaultAllocator() {
return new ArrayBufferAllocator();
@ -8690,24 +8602,20 @@ void Isolate::RunMicrotasks() {
reinterpret_cast<i::Isolate*>(this)->RunMicrotasks();
}
void Isolate::EnqueueMicrotask(Local<Function> microtask) {
void Isolate::EnqueueMicrotask(Local<Function> function) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->EnqueueMicrotask(Utils::OpenHandle(*microtask));
i::Handle<i::CallableTask> microtask = isolate->factory()->NewCallableTask(
Utils::OpenHandle(*function), isolate->native_context());
isolate->EnqueueMicrotask(microtask);
}
void Isolate::EnqueueMicrotask(MicrotaskCallback microtask, void* data) {
void Isolate::EnqueueMicrotask(MicrotaskCallback callback, void* data) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
i::HandleScope scope(isolate);
i::Handle<i::CallHandlerInfo> callback_info =
i::Handle<i::CallHandlerInfo>::cast(
isolate->factory()->NewStruct(i::TUPLE3_TYPE, i::NOT_TENURED));
SET_FIELD_WRAPPED(callback_info, set_callback, microtask);
SET_FIELD_WRAPPED(callback_info, set_js_callback,
callback_info->redirected_callback());
SET_FIELD_WRAPPED(callback_info, set_data, data);
isolate->EnqueueMicrotask(callback_info);
i::Handle<i::CallbackTask> microtask = isolate->factory()->NewCallbackTask(
isolate->factory()->NewForeign(reinterpret_cast<i::Address>(callback)),
isolate->factory()->NewForeign(reinterpret_cast<i::Address>(data)));
isolate->EnqueueMicrotask(microtask);
}
@ -8806,6 +8714,12 @@ void Isolate::LowMemoryNotification() {
int Isolate::ContextDisposedNotification(bool dependant_context) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
if (!dependant_context) {
// We left the current context, we can abort all running WebAssembly
// compilations.
isolate->wasm_engine()->compilation_manager()->AbortAllJobs();
}
// TODO(ahaas): move other non-heap activity out of the heap call.
return isolate->heap()->NotifyContextDisposed(dependant_context);
}
@ -9139,78 +9053,6 @@ Local<StackTrace> Exception::GetStackTrace(Local<Value> exception) {
// --- D e b u g S u p p o r t ---
bool Debug::SetDebugEventListener(Isolate* isolate, EventCallback that,
Local<Value> data) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
if (that == nullptr) {
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::HandleScope scope(i_isolate);
i_isolate->debug()->SetDebugDelegate(nullptr, false);
} else {
// Might create the Debug context.
ENTER_V8_FOR_NEW_CONTEXT(i_isolate);
i::HandleScope scope(i_isolate);
i::Handle<i::Object> i_data = i_isolate->factory()->undefined_value();
if (!data.IsEmpty()) i_data = Utils::OpenHandle(*data);
i::NativeDebugDelegate* delegate =
new i::NativeDebugDelegate(i_isolate, that, i_data);
i_isolate->debug()->SetDebugDelegate(delegate, true);
}
return true;
}
void Debug::DebugBreak(Isolate* isolate) { debug::DebugBreak(isolate); }
void Debug::CancelDebugBreak(Isolate* isolate) {
debug::CancelDebugBreak(isolate);
}
bool Debug::CheckDebugBreak(Isolate* isolate) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
return internal_isolate->stack_guard()->CheckDebugBreak();
}
void Debug::SetMessageHandler(Isolate* isolate,
v8::Debug::MessageHandler handler) {}
void Debug::SendCommand(Isolate* isolate, const uint16_t* command, int length,
ClientData* client_data) {}
MaybeLocal<Value> Debug::Call(Local<Context> context,
v8::Local<v8::Function> fun,
v8::Local<v8::Value> data) {
return debug::Call(context, fun, data);
}
void Debug::ProcessDebugMessages(Isolate* isolate) {}
Local<Context> Debug::GetDebugContext(Isolate* isolate) {
return debug::GetDebugContext(isolate);
}
MaybeLocal<Context> Debug::GetDebuggedContext(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
if (!i_isolate->debug()->in_debug_scope()) return MaybeLocal<Context>();
i::Handle<i::Object> calling = i_isolate->GetCallingNativeContext();
if (calling.is_null()) return MaybeLocal<Context>();
return Utils::ToLocal(i::Handle<i::Context>::cast(calling));
}
void Debug::SetLiveEditEnabled(Isolate* isolate, bool enable) {
debug::SetLiveEditEnabled(isolate, enable);
}
bool Debug::IsTailCallEliminationEnabled(Isolate* isolate) { return false; }
void Debug::SetTailCallEliminationEnabled(Isolate* isolate, bool enabled) {
}
MaybeLocal<Array> Debug::GetInternalProperties(Isolate* v8_isolate,
Local<Value> value) {
return debug::GetInternalProperties(v8_isolate, value);
}
void debug::SetContextId(Local<Context> context, int id) {
Utils::OpenHandle(*context)->set_debug_context_id(i::Smi::FromInt(id));
}
@ -9653,13 +9495,11 @@ MaybeLocal<UnboundScript> debug::CompileInspectorScript(Isolate* v8_isolate,
ScriptOriginOptions origin_options;
i::MaybeHandle<i::SharedFunctionInfo> maybe_function_info =
i::Compiler::GetSharedFunctionInfoForScript(
str, i::MaybeHandle<i::Object>(), 0, 0, origin_options,
i::MaybeHandle<i::Object>(), isolate->native_context(), nullptr,
str, i::Compiler::ScriptDetails(), origin_options, nullptr,
&script_data, ScriptCompiler::kNoCompileOptions,
ScriptCompiler::kNoCacheBecauseInspector,
i::FLAG_expose_inspector_scripts ? i::NOT_NATIVES_CODE
: i::INSPECTOR_CODE,
i::MaybeHandle<i::FixedArray>());
: i::INSPECTOR_CODE);
has_pending_exception = !maybe_function_info.ToHandle(&result);
RETURN_ON_FAILED_EXECUTION(UnboundScript);
}
@ -9836,6 +9676,18 @@ v8::Local<debug::GeneratorObject> debug::GeneratorObject::Cast(
return ToApiHandle<debug::GeneratorObject>(Utils::OpenHandle(*value));
}
MaybeLocal<v8::Value> debug::EvaluateGlobal(v8::Isolate* isolate,
v8::Local<v8::String> source) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
PREPARE_FOR_DEBUG_INTERFACE_EXECUTION_WITH_ISOLATE(internal_isolate, Value);
Local<Value> result;
has_pending_exception = !ToLocal<Value>(
i::DebugEvaluate::Global(internal_isolate, Utils::OpenHandle(*source)),
&result);
RETURN_ON_FAILED_EXECUTION(Value);
RETURN_ESCAPED(result);
}
void debug::QueryObjects(v8::Local<v8::Context> v8_context,
QueryObjectPredicate* predicate,
PersistentValueVector<v8::Object>* objects) {
@ -10464,6 +10316,12 @@ void HeapProfiler::SetGetRetainerInfosCallback(
callback);
}
void HeapProfiler::SetBuildEmbedderGraphCallback(
BuildEmbedderGraphCallback callback) {
reinterpret_cast<i::HeapProfiler*>(this)->SetBuildEmbedderGraphCallback(
callback);
}
v8::Testing::StressType internal::Testing::stress_type_ =
v8::Testing::kStressTypeOpt;
@ -10527,7 +10385,7 @@ void Testing::PrepareStressRun(int run) {
void Testing::DeoptimizeAll(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::HandleScope scope(i_isolate);
internal::Deoptimizer::DeoptimizeAll(i_isolate);
i::Deoptimizer::DeoptimizeAll(i_isolate);
}
@ -10571,14 +10429,15 @@ void HandleScopeImplementer::IterateThis(RootVisitor* v) {
if (last_handle_before_deferred_block_ != nullptr &&
(last_handle_before_deferred_block_ <= &block[kHandleBlockSize]) &&
(last_handle_before_deferred_block_ >= block)) {
v->VisitRootPointers(Root::kHandleScope, block,
v->VisitRootPointers(Root::kHandleScope, nullptr, block,
last_handle_before_deferred_block_);
DCHECK(!found_block_before_deferred);
#ifdef DEBUG
found_block_before_deferred = true;
#endif
} else {
v->VisitRootPointers(Root::kHandleScope, block, &block[kHandleBlockSize]);
v->VisitRootPointers(Root::kHandleScope, nullptr, block,
&block[kHandleBlockSize]);
}
}
@ -10587,7 +10446,7 @@ void HandleScopeImplementer::IterateThis(RootVisitor* v) {
// Iterate over live handles in the last block (if any).
if (!blocks()->empty()) {
v->VisitRootPointers(Root::kHandleScope, blocks()->back(),
v->VisitRootPointers(Root::kHandleScope, nullptr, blocks()->back(),
handle_scope_data_.next);
}
@ -10596,11 +10455,11 @@ void HandleScopeImplementer::IterateThis(RootVisitor* v) {
for (unsigned i = 0; i < arraysize(context_lists); i++) {
if (context_lists[i]->empty()) continue;
Object** start = reinterpret_cast<Object**>(&context_lists[i]->front());
v->VisitRootPointers(Root::kHandleScope, start,
v->VisitRootPointers(Root::kHandleScope, nullptr, start,
start + context_lists[i]->size());
}
if (microtask_context_) {
v->VisitRootPointer(Root::kHandleScope,
v->VisitRootPointer(Root::kHandleScope, nullptr,
reinterpret_cast<Object**>(&microtask_context_));
}
}
@ -10670,10 +10529,11 @@ void DeferredHandles::Iterate(RootVisitor* v) {
DCHECK((first_block_limit_ >= blocks_.front()) &&
(first_block_limit_ <= &(blocks_.front())[kHandleBlockSize]));
v->VisitRootPointers(Root::kHandleScope, blocks_.front(), first_block_limit_);
v->VisitRootPointers(Root::kHandleScope, nullptr, blocks_.front(),
first_block_limit_);
for (size_t i = 1; i < blocks_.size(); i++) {
v->VisitRootPointers(Root::kHandleScope, blocks_[i],
v->VisitRootPointers(Root::kHandleScope, nullptr, blocks_[i],
&blocks_[i][kHandleBlockSize]);
}
}

4
deps/v8/src/api.h vendored
View File

@ -180,6 +180,10 @@ class Utils {
v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<Float64Array> ToLocalFloat64Array(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<BigInt64Array> ToLocalBigInt64Array(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<BigUint64Array> ToLocalBigUint64Array(
v8::internal::Handle<v8::internal::JSTypedArray> obj);
static inline Local<SharedArrayBuffer> ToLocalShared(
v8::internal::Handle<v8::internal::JSArrayBuffer> obj);

View File

@ -46,7 +46,7 @@
namespace v8 {
namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return true; }
bool CpuFeatures::SupportsOptimizer() { return true; }
bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(NEON); }
@ -109,7 +109,7 @@ void RelocInfo::set_target_object(HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Assembler::set_target_address_at(target->GetIsolate(), pc_, constant_pool_,
Assembler::set_target_address_at(pc_, constant_pool_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
@ -143,27 +143,27 @@ Address RelocInfo::target_runtime_entry(Assembler* origin) {
return target_address();
}
void RelocInfo::set_target_runtime_entry(Isolate* isolate, Address target,
void RelocInfo::set_target_runtime_entry(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsRuntimeEntry(rmode_));
if (target_address() != target)
set_target_address(isolate, target, write_barrier_mode, icache_flush_mode);
set_target_address(target, write_barrier_mode, icache_flush_mode);
}
void RelocInfo::WipeOut(Isolate* isolate) {
void RelocInfo::WipeOut() {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_));
if (IsInternalReference(rmode_)) {
Memory::Address_at(pc_) = nullptr;
} else {
Assembler::set_target_address_at(isolate, pc_, constant_pool_, nullptr);
Assembler::set_target_address_at(pc_, constant_pool_, nullptr);
}
}
template <typename ObjectVisitor>
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(host(), this);
@ -189,7 +189,7 @@ Operand::Operand(const ExternalReference& f)
value_.immediate = reinterpret_cast<int32_t>(f.address());
}
Operand::Operand(Smi* value) : rmode_(RelocInfo::NONE32) {
Operand::Operand(Smi* value) : rmode_(RelocInfo::NONE) {
value_.immediate = reinterpret_cast<intptr_t>(value);
}
@ -273,15 +273,13 @@ Address Assembler::return_address_from_call_start(Address pc) {
}
}
void Assembler::deserialization_set_special_target_at(
Isolate* isolate, Address constant_pool_entry, Code* code, Address target) {
Address constant_pool_entry, Code* code, Address target) {
Memory::Address_at(constant_pool_entry) = target;
}
void Assembler::deserialization_set_target_internal_reference_at(
Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
Address pc, Address target, RelocInfo::Mode mode) {
Memory::Address_at(pc) = target;
}
@ -329,17 +327,15 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
}
}
void Assembler::set_target_address_at(Isolate* isolate, Address pc,
Address constant_pool, Address target,
void Assembler::set_target_address_at(Address pc, Address constant_pool,
Address target,
ICacheFlushMode icache_flush_mode) {
DCHECK_IMPLIES(isolate == nullptr, icache_flush_mode == SKIP_ICACHE_FLUSH);
if (is_constant_pool_load(pc)) {
// This is a constant pool lookup. Update the entry in the constant pool.
Memory::Address_at(constant_pool_entry_address(pc, constant_pool)) = target;
// Intuitively, we would think it is necessary to always flush the
// instruction cache after patching a target address in the code as follows:
// Assembler::FlushICache(isolate, pc, sizeof(target));
// Assembler::FlushICache(pc, sizeof(target));
// However, on ARM, no instruction is actually patched in the case
// of embedded constants of the form:
// ldr ip, [pp, #...]
@ -357,7 +353,7 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
DCHECK(IsMovW(Memory::int32_at(pc)));
DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(isolate, pc, 2 * kInstrSize);
Assembler::FlushICache(pc, 2 * kInstrSize);
}
} else {
// This is an mov / orr immediate load. Patch the immediate embedded in
@ -377,13 +373,42 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)) &&
IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(isolate, pc, 4 * kInstrSize);
Assembler::FlushICache(pc, 4 * kInstrSize);
}
}
}
EnsureSpace::EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
template <typename T>
bool UseScratchRegisterScope::CanAcquireVfp() const {
VfpRegList* available = assembler_->GetScratchVfpRegisterList();
DCHECK_NOT_NULL(available);
for (int index = 0; index < T::kNumRegisters; index++) {
T reg = T::from_code(index);
uint64_t mask = reg.ToVfpRegList();
if ((*available & mask) == mask) {
return true;
}
}
return false;
}
template <typename T>
T UseScratchRegisterScope::AcquireVfp() {
VfpRegList* available = assembler_->GetScratchVfpRegisterList();
DCHECK_NOT_NULL(available);
for (int index = 0; index < T::kNumRegisters; index++) {
T reg = T::from_code(index);
uint64_t mask = reg.ToVfpRegList();
if ((*available & mask) == mask) {
*available &= ~mask;
return reg;
}
}
UNREACHABLE();
}
} // namespace internal
} // namespace v8

View File

@ -347,22 +347,20 @@ uint32_t RelocInfo::embedded_size() const {
Assembler::target_address_at(pc_, constant_pool_));
}
void RelocInfo::set_embedded_address(Isolate* isolate, Address address,
void RelocInfo::set_embedded_address(Address address,
ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(isolate, pc_, constant_pool_, address,
flush_mode);
Assembler::set_target_address_at(pc_, constant_pool_, address, flush_mode);
}
void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(isolate, pc_, constant_pool_,
void RelocInfo::set_embedded_size(uint32_t size, ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(pc_, constant_pool_,
reinterpret_cast<Address>(size), flush_mode);
}
void RelocInfo::set_js_to_wasm_address(Isolate* isolate, Address address,
void RelocInfo::set_js_to_wasm_address(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
set_embedded_address(isolate, address, icache_flush_mode);
set_embedded_address(address, icache_flush_mode);
}
Address RelocInfo::js_to_wasm_address() const {
@ -566,10 +564,16 @@ Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
// it's awkward to use CpuFeatures::VFP32DREGS with CpuFeatureScope. To make
// its use consistent with other features, we always enable it if we can.
EnableCpuFeature(VFP32DREGS);
// Make sure we pick two D registers which alias a Q register. This way, we
// can use a Q as a scratch if NEON is supported.
scratch_vfp_register_list_ = d14.ToVfpRegList() | d15.ToVfpRegList();
} else {
// When VFP32DREGS is not supported, d15 become allocatable. Therefore we
// cannot use it as a scratch.
scratch_vfp_register_list_ = d14.ToVfpRegList();
}
}
Assembler::~Assembler() {
DCHECK_EQ(const_pool_blocked_nesting_, 0);
DCHECK_EQ(code_target_sharing_blocked_nesting_, 0);
@ -1214,6 +1218,7 @@ void Assembler::AddrMode1(Instr instr, Register rd, Register rn,
DCHECK(x.IsImmediate());
// Upon failure to encode, the opcode should not have changed.
DCHECK(opcode == (instr & kOpCodeMask));
UseScratchRegisterScope temps(this);
Condition cond = Instruction::ConditionField(instr);
if ((opcode == MOV) && !set_flags) {
// Generate a sequence of mov instructions or a load from the constant
@ -1221,7 +1226,7 @@ void Assembler::AddrMode1(Instr instr, Register rd, Register rn,
DCHECK(!rn.is_valid());
Move32BitImmediate(rd, x, cond);
} else if ((opcode == ADD) && !set_flags && (rd == rn) &&
(scratch_register_list_ == 0)) {
!temps.CanAcquire()) {
// Split the operation into a sequence of additions if we cannot use a
// scratch register. In this case, we cannot re-use rn and the assembler
// does not have any scratch registers to spare.
@ -1244,7 +1249,6 @@ void Assembler::AddrMode1(Instr instr, Register rd, Register rn,
// The immediate operand cannot be encoded as a shifter operand, so load
// it first to a scratch register and change the original instruction to
// use it.
UseScratchRegisterScope temps(this);
// Re-use the destination register if possible.
Register scratch =
(rd.is_valid() && rd != rn && rd != pc) ? rd : temps.Acquire();
@ -1501,6 +1505,10 @@ void Assembler::and_(Register dst, Register src1, const Operand& src2,
AddrMode1(cond | AND | s, dst, src1, src2);
}
void Assembler::and_(Register dst, Register src1, Register src2, SBit s,
Condition cond) {
and_(dst, src1, Operand(src2), s, cond);
}
void Assembler::eor(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
@ -2367,6 +2375,11 @@ void Assembler::isb(BarrierOption option) {
}
}
void Assembler::csdb() {
// Details available in Arm Cache Speculation Side-channels white paper,
// version 1.1, page 4.
emit(0xE320F014);
}
// Coprocessor instructions.
void Assembler::cdp(Coprocessor coproc,
@ -5153,8 +5166,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
intptr_t value) {
DCHECK(rmode != RelocInfo::COMMENT && rmode != RelocInfo::CONST_POOL &&
rmode != RelocInfo::NONE64);
DCHECK(rmode != RelocInfo::COMMENT && rmode != RelocInfo::CONST_POOL);
bool sharing_ok = RelocInfo::IsNone(rmode) ||
(rmode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE);
DCHECK_LT(pending_32_bit_constants_.size(), kMaxNumPending32Constants);
@ -5474,24 +5486,24 @@ void PatchingAssembler::Emit(Address addr) {
emit(reinterpret_cast<Instr>(addr));
}
void PatchingAssembler::FlushICache(Isolate* isolate) {
Assembler::FlushICache(isolate, buffer_, buffer_size_ - kGap);
}
UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler)
: available_(assembler->GetScratchRegisterList()),
old_available_(*available_) {}
: assembler_(assembler),
old_available_(*assembler->GetScratchRegisterList()),
old_available_vfp_(*assembler->GetScratchVfpRegisterList()) {}
UseScratchRegisterScope::~UseScratchRegisterScope() {
*available_ = old_available_;
*assembler_->GetScratchRegisterList() = old_available_;
*assembler_->GetScratchVfpRegisterList() = old_available_vfp_;
}
Register UseScratchRegisterScope::Acquire() {
DCHECK_NOT_NULL(available_);
DCHECK_NE(*available_, 0);
int index = static_cast<int>(base::bits::CountTrailingZeros32(*available_));
*available_ &= ~(1UL << index);
return Register::from_code(index);
RegList* available = assembler_->GetScratchRegisterList();
DCHECK_NOT_NULL(available);
DCHECK_NE(*available, 0);
int index = static_cast<int>(base::bits::CountTrailingZeros32(*available));
Register reg = Register::from_code(index);
*available &= ~reg.bit();
return reg;
}
} // namespace internal

View File

@ -56,8 +56,9 @@ namespace internal {
V(r0) V(r1) V(r2) V(r3) V(r4) V(r5) V(r6) V(r7) \
V(r8) V(r9) V(r10) V(fp) V(ip) V(sp) V(lr) V(pc)
#define ALLOCATABLE_GENERAL_REGISTERS(V) \
V(r0) V(r1) V(r2) V(r3) V(r4) V(r5) V(r6) V(r7) V(r8)
#define ALLOCATABLE_GENERAL_REGISTERS(V) \
V(r0) V(r1) V(r2) V(r3) V(r4) V(r5) V(r6) V(r7) \
V(r8) V(r9)
#define FLOAT_REGISTERS(V) \
V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) \
@ -184,6 +185,17 @@ enum SwVfpRegisterCode {
kSwVfpAfterLast
};
// Representation of a list of non-overlapping VFP registers. This list
// represents the data layout of VFP registers as a bitfield:
// S registers cover 1 bit
// D registers cover 2 bits
// Q registers cover 4 bits
//
// This way, we make sure no registers in the list ever overlap. However, a list
// may represent multiple different sets of registers,
// e.g. [d0 s2 s3] <=> [s0 s1 d1].
typedef uint64_t VfpRegList;
// Single word VFP register.
class SwVfpRegister : public RegisterBase<SwVfpRegister, kSwVfpAfterLast> {
public:
@ -195,6 +207,11 @@ class SwVfpRegister : public RegisterBase<SwVfpRegister, kSwVfpAfterLast> {
*vm = reg_code >> 1;
}
void split_code(int* vm, int* m) const { split_code(code(), vm, m); }
VfpRegList ToVfpRegList() const {
DCHECK(is_valid());
// Each bit in the list corresponds to a S register.
return uint64_t{0x1} << code();
}
private:
friend class RegisterBase;
@ -217,10 +234,6 @@ enum DoubleRegisterCode {
// Double word VFP register.
class DwVfpRegister : public RegisterBase<DwVfpRegister, kDoubleAfterLast> {
public:
// A few double registers are reserved: one as a scratch register and one to
// hold 0.0, that does not fit in the immediate field of vmov instructions.
// d14: 0.0
// d15: scratch register.
static constexpr int kSizeInBytes = 8;
inline static int NumRegisters();
@ -231,6 +244,11 @@ class DwVfpRegister : public RegisterBase<DwVfpRegister, kDoubleAfterLast> {
*vm = reg_code & 0x0F;
}
void split_code(int* vm, int* m) const { split_code(code(), vm, m); }
VfpRegList ToVfpRegList() const {
DCHECK(is_valid());
// A D register overlaps two S registers.
return uint64_t{0x3} << (code() * 2);
}
private:
friend class RegisterBase;
@ -255,6 +273,11 @@ class LowDwVfpRegister
SwVfpRegister high() const {
return SwVfpRegister::from_code(code() * 2 + 1);
}
VfpRegList ToVfpRegList() const {
DCHECK(is_valid());
// A D register overlaps two S registers.
return uint64_t{0x3} << (code() * 2);
}
private:
friend class RegisterBase;
@ -282,6 +305,11 @@ class QwNeonRegister : public RegisterBase<QwNeonRegister, kSimd128AfterLast> {
DwVfpRegister high() const {
return DwVfpRegister::from_code(code() * 2 + 1);
}
VfpRegList ToVfpRegList() const {
DCHECK(is_valid());
// A Q register overlaps four S registers.
return uint64_t{0xf} << (code() * 4);
}
private:
friend class RegisterBase;
@ -334,12 +362,6 @@ SIMD128_REGISTERS(DECLARE_SIMD128_REGISTER)
constexpr LowDwVfpRegister kFirstCalleeSavedDoubleReg = d8;
constexpr LowDwVfpRegister kLastCalleeSavedDoubleReg = d15;
constexpr LowDwVfpRegister kDoubleRegZero = d13;
constexpr LowDwVfpRegister kScratchDoubleReg = d14;
// This scratch q-register aliases d14 (kScratchDoubleReg) and d15, but is only
// used if NEON is supported, which implies VFP32DREGS. When there are only 16
// d-registers, d15 is still allocatable.
constexpr QwNeonRegister kScratchQuadReg = q7;
constexpr LowDwVfpRegister kScratchDoubleReg2 = d15;
constexpr CRegister no_creg = CRegister::no_reg();
@ -376,7 +398,7 @@ class Operand BASE_EMBEDDED {
public:
// immediate
INLINE(explicit Operand(int32_t immediate,
RelocInfo::Mode rmode = RelocInfo::NONE32));
RelocInfo::Mode rmode = RelocInfo::NONE));
INLINE(static Operand Zero());
INLINE(explicit Operand(const ExternalReference& f));
explicit Operand(Handle<HeapObject> handle);
@ -651,7 +673,7 @@ class Assembler : public AssemblerBase {
// The isolate argument is unused (and may be nullptr) when skipping flushing.
INLINE(static Address target_address_at(Address pc, Address constant_pool));
INLINE(static void set_target_address_at(
Isolate* isolate, Address pc, Address constant_pool, Address target,
Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
// Return the code target address at a call site from the return address
@ -665,12 +687,11 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
Isolate* isolate, Address constant_pool_entry, Code* code,
Address target);
Address constant_pool_entry, Code* code, Address target);
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
Isolate* isolate, Address pc, Address target,
Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
// Here we are patching the address in the constant pool, not the actual call
@ -685,6 +706,9 @@ class Assembler : public AssemblerBase {
// register.
static constexpr int kPcLoadDelta = 8;
RegList* GetScratchRegisterList() { return &scratch_register_list_; }
VfpRegList* GetScratchVfpRegisterList() {
return &scratch_vfp_register_list_;
}
// ---------------------------------------------------------------------------
// Code generation
@ -717,6 +741,8 @@ class Assembler : public AssemblerBase {
void and_(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al);
void and_(Register dst, Register src1, Register src2, SBit s = LeaveCC,
Condition cond = al);
void eor(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al);
@ -936,6 +962,9 @@ class Assembler : public AssemblerBase {
void dsb(BarrierOption option);
void isb(BarrierOption option);
// Conditional speculation barrier.
void csdb();
// Coprocessor instructions
void cdp(Coprocessor coproc, int opcode_1,
@ -1655,6 +1684,7 @@ class Assembler : public AssemblerBase {
// Scratch registers available for use by the Assembler.
RegList scratch_register_list_;
VfpRegList scratch_vfp_register_list_;
private:
// Avoid overflows for displacements etc.
@ -1732,6 +1762,7 @@ class Assembler : public AssemblerBase {
friend class BlockConstPoolScope;
friend class BlockCodeTargetSharingScope;
friend class EnsureSpace;
friend class UseScratchRegisterScope;
// The following functions help with avoiding allocations of embedded heap
// objects during the code assembly phase. {RequestHeapObject} records the
@ -1747,8 +1778,6 @@ class Assembler : public AssemblerBase {
std::forward_list<HeapObjectRequest> heap_object_requests_;
};
constexpr int kNoCodeAgeSequenceLength = 3 * Assembler::kInstrSize;
class EnsureSpace BASE_EMBEDDED {
public:
INLINE(explicit EnsureSpace(Assembler* assembler));
@ -1760,7 +1789,6 @@ class PatchingAssembler : public Assembler {
~PatchingAssembler();
void Emit(Address addr);
void FlushICache(Isolate* isolate);
};
// This scope utility allows scratch registers to be managed safely. The
@ -1779,12 +1807,38 @@ class UseScratchRegisterScope {
// Take a register from the list and return it.
Register Acquire();
SwVfpRegister AcquireS() { return AcquireVfp<SwVfpRegister>(); }
LowDwVfpRegister AcquireLowD() { return AcquireVfp<LowDwVfpRegister>(); }
DwVfpRegister AcquireD() {
DwVfpRegister reg = AcquireVfp<DwVfpRegister>();
DCHECK(assembler_->VfpRegisterIsAvailable(reg));
return reg;
}
QwNeonRegister AcquireQ() {
QwNeonRegister reg = AcquireVfp<QwNeonRegister>();
DCHECK(assembler_->VfpRegisterIsAvailable(reg));
return reg;
}
private:
// Currently available scratch registers.
RegList* available_;
friend class Assembler;
friend class TurboAssembler;
// Check if we have registers available to acquire.
// These methods are kept private intentionally to restrict their usage to the
// assemblers. Choosing to emit a difference instruction sequence depending on
// the availability of scratch registers is generally their job.
bool CanAcquire() const { return *assembler_->GetScratchRegisterList() != 0; }
template <typename T>
bool CanAcquireVfp() const;
template <typename T>
T AcquireVfp();
Assembler* assembler_;
// Available scratch registers at the start of this scope.
RegList old_available_;
VfpRegList old_available_vfp_;
};
} // namespace internal

View File

@ -46,7 +46,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
UseScratchRegisterScope temps(masm);
Register double_low = GetRegisterThatIsNotOneOf(result_reg);
Register double_high = GetRegisterThatIsNotOneOf(result_reg, double_low);
LowDwVfpRegister double_scratch = kScratchDoubleReg;
LowDwVfpRegister double_scratch = temps.AcquireLowD();
// Save the old values from these temporary registers on the stack.
__ Push(double_high, double_low);
@ -385,6 +385,12 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ cmp(cp, Operand(0));
__ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
// Reset the masking register. This is done independent of the underlying
// feature flag {FLAG_branch_load_poisoning} to make the snapshot work with
// both configurations. It is safe to always do this, because the underlying
// register is caller-saved and can be arbitrarily clobbered.
__ ResetSpeculationPoisonRegister();
// Compute the handler entry address and jump to it.
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
__ mov(r1, Operand(pending_handler_entrypoint_address));
@ -572,8 +578,8 @@ void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
Zone* zone) {
if (tasm->isolate()->function_entry_hook() != nullptr) {
tasm->MaybeCheckConstPool();
PredictableCodeSizeScope predictable(tasm);
predictable.ExpectSize(tasm->CallStubSize() + 2 * Assembler::kInstrSize);
PredictableCodeSizeScope predictable(
tasm, tasm->CallStubSize() + 2 * Assembler::kInstrSize);
tasm->push(lr);
tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
tasm->pop(lr);
@ -584,8 +590,8 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != nullptr) {
ProfileEntryHookStub stub(masm->isolate());
masm->MaybeCheckConstPool();
PredictableCodeSizeScope predictable(masm);
predictable.ExpectSize(masm->CallStubSize() + 2 * Assembler::kInstrSize);
PredictableCodeSizeScope predictable(
masm, masm->CallStubSize() + 2 * Assembler::kInstrSize);
__ push(lr);
__ CallStub(&stub);
__ pop(lr);

View File

@ -166,9 +166,9 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
CodeDesc desc;
masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
DCHECK(!RelocInfo::RequiresRelocation(desc));
Assembler::FlushICache(isolate, buffer, allocated);
Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
@ -257,7 +257,7 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
CodeDesc desc;
masm.GetCode(isolate, &desc);
Assembler::FlushICache(isolate, buffer, allocated);
Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<MemCopyUint16Uint8Function>(buffer);
#endif
@ -282,9 +282,9 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
CodeDesc desc;
masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
DCHECK(!RelocInfo::RequiresRelocation(desc));
Assembler::FlushICache(isolate, buffer, allocated);
Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif

View File

@ -641,8 +641,8 @@ class Instruction {
&& (Bit(20) == 0)
&& ((Bit(7) == 0)); }
// Test for a nop instruction, which falls under type 1.
inline bool IsNopType1() const { return Bits(24, 0) == 0x0120F000; }
// Test for nop-like instructions which fall under type 1.
inline bool IsNopLikeType1() const { return Bits(24, 8) == 0x120F0; }
// Test for a stop instruction.
inline bool IsStop() const {

View File

@ -30,9 +30,6 @@ void Deoptimizer::TableEntryGenerator::Generate() {
const int kFloatRegsSize = kFloatSize * SwVfpRegister::kNumRegisters;
// Save all allocatable VFP registers before messing with them.
DCHECK_EQ(kDoubleRegZero.code(), 13);
DCHECK_EQ(kScratchDoubleReg.code(), 14);
{
// We use a run-time check for VFP32DREGS.
CpuFeatureScope scope(masm(), VFP32DREGS,

View File

@ -937,8 +937,14 @@ void Decoder::DecodeType01(Instruction* instr) {
} else {
Unknown(instr); // not used by V8
}
} else if ((type == 1) && instr->IsNopType1()) {
Format(instr, "nop'cond");
} else if ((type == 1) && instr->IsNopLikeType1()) {
if (instr->BitField(7, 0) == 0) {
Format(instr, "nop'cond");
} else if (instr->BitField(7, 0) == 20) {
Format(instr, "csdb");
} else {
Unknown(instr); // Not used in V8.
}
} else {
switch (instr->OpcodeField()) {
case AND: {

View File

@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_ARM_FRAMES_ARM_H_
#define V8_ARM_FRAMES_ARM_H_
#ifndef V8_ARM_FRAME_CONSTANTS_ARM_H_
#define V8_ARM_FRAME_CONSTANTS_ARM_H_
namespace v8 {
namespace internal {
@ -45,4 +45,4 @@ class JavaScriptFrameConstants : public AllStatic {
} // namespace internal
} // namespace v8
#endif // V8_ARM_FRAMES_ARM_H_
#endif // V8_ARM_FRAME_CONSTANTS_ARM_H_

View File

@ -70,12 +70,6 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return r0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r3; }
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1, r2, r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return r0; }

View File

@ -18,6 +18,7 @@
#include "src/double.h"
#include "src/external-reference-table.h"
#include "src/frames-inl.h"
#include "src/instruction-stream.h"
#include "src/objects-inl.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
@ -241,22 +242,6 @@ void TurboAssembler::Ret(int drop, Condition cond) {
Ret(cond);
}
void MacroAssembler::Swap(Register reg1,
Register reg2,
Register scratch,
Condition cond) {
if (scratch == no_reg) {
eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
eor(reg2, reg2, Operand(reg1), LeaveCC, cond);
eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
} else {
mov(scratch, reg1, LeaveCC, cond);
mov(reg1, reg2, LeaveCC, cond);
mov(reg2, scratch, LeaveCC, cond);
}
}
void TurboAssembler::Call(Label* target) { bl(target); }
void TurboAssembler::Push(Handle<HeapObject> handle) {
@ -305,27 +290,34 @@ void TurboAssembler::Move(QwNeonRegister dst, QwNeonRegister src) {
}
}
void TurboAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
if (srcdst0 == srcdst1) return; // Swapping aliased registers emits nothing.
void TurboAssembler::Swap(Register srcdst0, Register srcdst1) {
DCHECK(srcdst0 != srcdst1);
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
mov(scratch, srcdst0);
mov(srcdst0, srcdst1);
mov(srcdst1, scratch);
}
void TurboAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
DCHECK(srcdst0 != srcdst1);
DCHECK(VfpRegisterIsAvailable(srcdst0));
DCHECK(VfpRegisterIsAvailable(srcdst1));
if (CpuFeatures::IsSupported(NEON)) {
vswp(srcdst0, srcdst1);
} else {
DCHECK_NE(srcdst0, kScratchDoubleReg);
DCHECK_NE(srcdst1, kScratchDoubleReg);
vmov(kScratchDoubleReg, srcdst0);
UseScratchRegisterScope temps(this);
DwVfpRegister scratch = temps.AcquireD();
vmov(scratch, srcdst0);
vmov(srcdst0, srcdst1);
vmov(srcdst1, kScratchDoubleReg);
vmov(srcdst1, scratch);
}
}
void TurboAssembler::Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1) {
if (srcdst0 != srcdst1) {
vswp(srcdst0, srcdst1);
}
DCHECK(srcdst0 != srcdst1);
vswp(srcdst0, srcdst1);
}
void MacroAssembler::Mls(Register dst, Register src1, Register src2,
@ -817,11 +809,14 @@ void TurboAssembler::VmovExtended(int dst_code, int src_code) {
int dst_offset = dst_code & 1;
int src_offset = src_code & 1;
if (CpuFeatures::IsSupported(NEON)) {
UseScratchRegisterScope temps(this);
DwVfpRegister scratch = temps.AcquireD();
// On Neon we can shift and insert from d-registers.
if (src_offset == dst_offset) {
// Offsets are the same, use vdup to copy the source to the opposite lane.
vdup(Neon32, kScratchDoubleReg, src_d_reg, src_offset);
src_d_reg = kScratchDoubleReg;
vdup(Neon32, scratch, src_d_reg, src_offset);
// Here we are extending the lifetime of scratch.
src_d_reg = scratch;
src_offset = dst_offset ^ 1;
}
if (dst_offset) {
@ -842,27 +837,30 @@ void TurboAssembler::VmovExtended(int dst_code, int src_code) {
// Without Neon, use the scratch registers to move src and/or dst into
// s-registers.
int scratchSCode = kScratchDoubleReg.low().code();
int scratchSCode2 = kScratchDoubleReg2.low().code();
UseScratchRegisterScope temps(this);
LowDwVfpRegister d_scratch = temps.AcquireLowD();
LowDwVfpRegister d_scratch2 = temps.AcquireLowD();
int s_scratch_code = d_scratch.low().code();
int s_scratch_code2 = d_scratch2.low().code();
if (src_code < SwVfpRegister::kNumRegisters) {
// src is an s-register, dst is not.
vmov(kScratchDoubleReg, dst_d_reg);
vmov(SwVfpRegister::from_code(scratchSCode + dst_offset),
vmov(d_scratch, dst_d_reg);
vmov(SwVfpRegister::from_code(s_scratch_code + dst_offset),
SwVfpRegister::from_code(src_code));
vmov(dst_d_reg, kScratchDoubleReg);
vmov(dst_d_reg, d_scratch);
} else if (dst_code < SwVfpRegister::kNumRegisters) {
// dst is an s-register, src is not.
vmov(kScratchDoubleReg, src_d_reg);
vmov(d_scratch, src_d_reg);
vmov(SwVfpRegister::from_code(dst_code),
SwVfpRegister::from_code(scratchSCode + src_offset));
SwVfpRegister::from_code(s_scratch_code + src_offset));
} else {
// Neither src or dst are s-registers. Both scratch double registers are
// available when there are 32 VFP registers.
vmov(kScratchDoubleReg, src_d_reg);
vmov(kScratchDoubleReg2, dst_d_reg);
vmov(SwVfpRegister::from_code(scratchSCode + dst_offset),
SwVfpRegister::from_code(scratchSCode2 + src_offset));
vmov(dst_d_reg, kScratchQuadReg.high());
vmov(d_scratch, src_d_reg);
vmov(d_scratch2, dst_d_reg);
vmov(SwVfpRegister::from_code(s_scratch_code + dst_offset),
SwVfpRegister::from_code(s_scratch_code2 + src_offset));
vmov(dst_d_reg, d_scratch2);
}
}
@ -870,11 +868,13 @@ void TurboAssembler::VmovExtended(int dst_code, const MemOperand& src) {
if (dst_code < SwVfpRegister::kNumRegisters) {
vldr(SwVfpRegister::from_code(dst_code), src);
} else {
UseScratchRegisterScope temps(this);
LowDwVfpRegister scratch = temps.AcquireLowD();
// TODO(bbudge) If Neon supported, use load single lane form of vld1.
int dst_s_code = kScratchDoubleReg.low().code() + (dst_code & 1);
vmov(kScratchDoubleReg, DwVfpRegister::from_code(dst_code / 2));
int dst_s_code = scratch.low().code() + (dst_code & 1);
vmov(scratch, DwVfpRegister::from_code(dst_code / 2));
vldr(SwVfpRegister::from_code(dst_s_code), src);
vmov(DwVfpRegister::from_code(dst_code / 2), kScratchDoubleReg);
vmov(DwVfpRegister::from_code(dst_code / 2), scratch);
}
}
@ -883,8 +883,10 @@ void TurboAssembler::VmovExtended(const MemOperand& dst, int src_code) {
vstr(SwVfpRegister::from_code(src_code), dst);
} else {
// TODO(bbudge) If Neon supported, use store single lane form of vst1.
int src_s_code = kScratchDoubleReg.low().code() + (src_code & 1);
vmov(kScratchDoubleReg, DwVfpRegister::from_code(src_code / 2));
UseScratchRegisterScope temps(this);
LowDwVfpRegister scratch = temps.AcquireLowD();
int src_s_code = scratch.low().code() + (src_code & 1);
vmov(scratch, DwVfpRegister::from_code(src_code / 2));
vstr(SwVfpRegister::from_code(src_s_code), dst);
}
}
@ -938,9 +940,11 @@ void TurboAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
void TurboAssembler::LslPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register scratch, Register shift) {
Register shift) {
DCHECK(!AreAliased(dst_high, src_low));
DCHECK(!AreAliased(dst_high, shift));
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
Label less_than_32;
Label done;
@ -984,9 +988,11 @@ void TurboAssembler::LslPair(Register dst_low, Register dst_high,
void TurboAssembler::LsrPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register scratch, Register shift) {
Register shift) {
DCHECK(!AreAliased(dst_low, src_high));
DCHECK(!AreAliased(dst_low, shift));
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
Label less_than_32;
Label done;
@ -1031,9 +1037,11 @@ void TurboAssembler::LsrPair(Register dst_low, Register dst_high,
void TurboAssembler::AsrPair(Register dst_low, Register dst_high,
Register src_low, Register src_high,
Register scratch, Register shift) {
Register shift) {
DCHECK(!AreAliased(dst_low, src_high));
DCHECK(!AreAliased(dst_low, shift));
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
Label less_than_32;
Label done;
@ -1362,13 +1370,30 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
Label skip_hook;
Label skip_hook, call_hook;
ExternalReference debug_is_active =
ExternalReference::debug_is_active_address(isolate());
mov(r4, Operand(debug_is_active));
ldrsb(r4, MemOperand(r4));
cmp(r4, Operand(0));
b(eq, &skip_hook);
ExternalReference debug_hook_avtive =
ExternalReference::debug_hook_on_function_call_address(isolate());
mov(r4, Operand(debug_hook_avtive));
ldrsb(r4, MemOperand(r4));
cmp(r4, Operand(0));
b(ne, &call_hook);
ldr(r4, FieldMemOperand(fun, JSFunction::kSharedFunctionInfoOffset));
ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kDebugInfoOffset));
JumpIfSmi(r4, &skip_hook);
ldr(r4, FieldMemOperand(r4, DebugInfo::kFlagsOffset));
tst(r4, Operand(Smi::FromInt(DebugInfo::kBreakAtEntry)));
b(eq, &skip_hook);
bind(&call_hook);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@ -1426,7 +1451,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
Register code = r4;
Register code = kJavaScriptCallCodeStartRegister;
ldr(code, FieldMemOperand(function, JSFunction::kCodeOffset));
add(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
if (flag == CALL_FUNCTION) {
@ -1480,14 +1505,6 @@ void MacroAssembler::InvokeFunction(Register function,
InvokeFunctionCode(r1, no_reg, expected, actual, flag);
}
void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag) {
Move(r1, function);
InvokeFunction(r1, expected, actual, flag);
}
void MacroAssembler::MaybeDropFrames() {
// Check whether we need to drop frames to restart a function on the stack.
ExternalReference restart_fp =
@ -1615,13 +1632,22 @@ void MacroAssembler::TryDoubleToInt32Exact(Register result,
void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
DwVfpRegister double_input,
Label* done) {
LowDwVfpRegister double_scratch = kScratchDoubleReg;
vcvt_s32_f64(double_scratch.low(), double_input);
vmov(result, double_scratch.low());
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
SwVfpRegister single_scratch = SwVfpRegister::no_reg();
if (temps.CanAcquireVfp<SwVfpRegister>()) {
single_scratch = temps.AcquireS();
} else {
// Re-use the input as a scratch register. However, we can only do this if
// the input register is d0-d15 as there are no s32+ registers.
DCHECK_LT(double_input.code(), LowDwVfpRegister::kNumRegisters);
LowDwVfpRegister double_scratch =
LowDwVfpRegister::from_code(double_input.code());
single_scratch = double_scratch.low();
}
vcvt_s32_f64(single_scratch, double_input);
vmov(result, single_scratch);
Register scratch = temps.Acquire();
// If result is not saturated (0x7FFFFFFF or 0x80000000), we are done.
sub(scratch, result, Operand(1));
cmp(scratch, Operand(0x7FFFFFFE));
@ -1704,6 +1730,12 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
void MacroAssembler::JumpToInstructionStream(const InstructionStream* stream) {
int32_t bytes_address = reinterpret_cast<int32_t>(stream->bytes());
mov(kOffHeapTrampolineRegister, Operand(bytes_address, RelocInfo::NONE));
Jump(kOffHeapTrampolineRegister);
}
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
DCHECK_GT(value, 0);
@ -2275,6 +2307,15 @@ bool AreAliased(Register reg1,
}
#endif
void TurboAssembler::ComputeCodeStartAddress(Register dst) {
// We can use the register pc - 8 for the address of the current instruction.
sub(dst, pc, Operand(pc_offset() + TurboAssembler::kPcLoadDelta));
}
void TurboAssembler::ResetSpeculationPoisonRegister() {
mov(kSpeculationPoisonRegister, Operand(-1));
}
} // namespace internal
} // namespace v8

View File

@ -20,12 +20,15 @@ constexpr Register kReturnRegister2 = r2;
constexpr Register kJSFunctionRegister = r1;
constexpr Register kContextRegister = r7;
constexpr Register kAllocateSizeRegister = r1;
constexpr Register kSpeculationPoisonRegister = r9;
constexpr Register kInterpreterAccumulatorRegister = r0;
constexpr Register kInterpreterBytecodeOffsetRegister = r5;
constexpr Register kInterpreterBytecodeArrayRegister = r6;
constexpr Register kInterpreterDispatchTableRegister = r8;
constexpr Register kJavaScriptCallArgCountRegister = r0;
constexpr Register kJavaScriptCallCodeStartRegister = r2;
constexpr Register kJavaScriptCallNewTargetRegister = r3;
constexpr Register kOffHeapTrampolineRegister = r4;
constexpr Register kRuntimeCallFunctionRegister = r1;
constexpr Register kRuntimeCallArgCountRegister = r0;
@ -305,15 +308,15 @@ class TurboAssembler : public Assembler {
inline bool AllowThisStubCall(CodeStub* stub);
void LslPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, Register scratch, Register shift);
Register src_high, Register shift);
void LslPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, uint32_t shift);
void LsrPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, Register scratch, Register shift);
Register src_high, Register shift);
void LsrPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, uint32_t shift);
void AsrPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, Register scratch, Register shift);
Register src_high, Register shift);
void AsrPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, uint32_t shift);
@ -481,7 +484,8 @@ class TurboAssembler : public Assembler {
void VmovExtended(int dst_code, const MemOperand& src);
void VmovExtended(const MemOperand& dst, int src_code);
// Register swap.
// Register swap. Note that the register operands should be distinct.
void Swap(Register srcdst0, Register srcdst1);
void Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1);
void Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1);
@ -530,6 +534,12 @@ class TurboAssembler : public Assembler {
#endif
}
// Compute the start of the generated instruction stream from the current PC.
// This is an alternative to embedding the {CodeObject} handle as a reference.
void ComputeCodeStartAddress(Register dst);
void ResetSpeculationPoisonRegister();
private:
bool has_frame_ = false;
Isolate* const isolate_;
@ -579,11 +589,6 @@ class MacroAssembler : public TurboAssembler {
MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object);
// Swap two registers. If the scratch register is omitted then a slightly
// less efficient form using xor instead of mov is emitted.
void Swap(Register reg1, Register reg2, Register scratch = no_reg,
Condition cond = al);
void Mls(Register dst, Register src1, Register src2, Register srcA,
Condition cond = al);
void And(Register dst, Register src1, const Operand& src2,
@ -694,10 +699,6 @@ class MacroAssembler : public TurboAssembler {
void InvokeFunction(Register function, const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag);
void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag);
// Frame restart support
void MaybeDropFrames();
@ -797,6 +798,9 @@ class MacroAssembler : public TurboAssembler {
void JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame = false);
// Generates a trampoline to jump to the off-heap instruction stream.
void JumpToInstructionStream(const InstructionStream* stream);
// ---------------------------------------------------------------------------
// StatsCounter support

View File

@ -547,8 +547,7 @@ void ArmDebugger::Debug() {
#undef XSTR
}
static bool ICacheMatch(void* one, void* two) {
bool Simulator::ICacheMatch(void* one, void* two) {
DCHECK_EQ(reinterpret_cast<intptr_t>(one) & CachePage::kPageMask, 0);
DCHECK_EQ(reinterpret_cast<intptr_t>(two) & CachePage::kPageMask, 0);
return one == two;
@ -645,11 +644,6 @@ void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
i_cache_ = isolate_->simulator_i_cache();
if (i_cache_ == nullptr) {
i_cache_ = new base::CustomMatcherHashMap(&ICacheMatch);
isolate_->set_simulator_i_cache(i_cache_);
}
// Set up simulator support first. Some of this information is needed to
// setup the architecture state.
size_t stack_size = 1 * 1024*1024; // allocate 1MB for stack
@ -2314,8 +2308,15 @@ void Simulator::DecodeType01(Instruction* instr) {
PrintF("%08x\n", instr->InstructionBits());
UNIMPLEMENTED();
}
} else if ((type == 1) && instr->IsNopType1()) {
// NOP.
} else if ((type == 1) && instr->IsNopLikeType1()) {
if (instr->BitField(7, 0) == 0) {
// NOP.
} else if (instr->BitField(7, 0) == 20) {
// CSDB.
} else {
PrintF("%08x\n", instr->InstructionBits());
UNIMPLEMENTED();
}
} else {
int rd = instr->RdValue();
int rn = instr->RnValue();
@ -5640,7 +5641,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
// Executes the current instruction.
void Simulator::InstructionDecode(Instruction* instr) {
if (v8::internal::FLAG_check_icache) {
CheckICache(isolate_->simulator_i_cache(), instr);
CheckICache(i_cache(), instr);
}
pc_modified_ = false;
if (::v8::internal::FLAG_trace_sim) {
@ -5822,7 +5823,7 @@ intptr_t Simulator::CallImpl(byte* entry, int argument_count,
return get_register(r0);
}
int32_t Simulator::CallFPImpl(byte* entry, double d0, double d1) {
intptr_t Simulator::CallFPImpl(byte* entry, double d0, double d1) {
if (use_eabi_hardfloat()) {
set_d_register_from_double(0, d0);
set_d_register_from_double(1, d1);

View File

@ -183,6 +183,7 @@ class Simulator : public SimulatorBase {
static void SetRedirectInstruction(Instruction* instruction);
// ICache checking.
static bool ICacheMatch(void* one, void* two);
static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
size_t size);
@ -381,9 +382,6 @@ class Simulator : public SimulatorBase {
// Debugger input.
char* last_debugger_input_;
// Icache simulation
base::CustomMatcherHashMap* i_cache_;
// Registered breakpoints.
Instruction* break_pc_;
Instr break_instr_;

View File

@ -13,8 +13,7 @@
namespace v8 {
namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return true; }
bool CpuFeatures::SupportsOptimizer() { return true; }
bool CpuFeatures::SupportsWasmSimd128() { return true; }
@ -95,7 +94,7 @@ inline void CPURegList::Remove(int code) {
inline Register Register::XRegFromCode(unsigned code) {
if (code == kSPRegInternalCode) {
return csp;
return sp;
} else {
DCHECK_LT(code, static_cast<unsigned>(kNumberOfRegisters));
return Register::Create(code, kXRegSizeInBits);
@ -105,7 +104,7 @@ inline Register Register::XRegFromCode(unsigned code) {
inline Register Register::WRegFromCode(unsigned code) {
if (code == kSPRegInternalCode) {
return wcsp;
return wsp;
} else {
DCHECK_LT(code, static_cast<unsigned>(kNumberOfRegisters));
return Register::Create(code, kWRegSizeInBits);
@ -198,9 +197,7 @@ inline VRegister CPURegister::Q() const {
template<typename T>
struct ImmediateInitializer {
static const bool kIsIntType = true;
static inline RelocInfo::Mode rmode_for(T) {
return sizeof(T) == 8 ? RelocInfo::NONE64 : RelocInfo::NONE32;
}
static inline RelocInfo::Mode rmode_for(T) { return RelocInfo::NONE; }
static inline int64_t immediate_for(T t) {
STATIC_ASSERT(sizeof(T) <= 8);
return t;
@ -211,9 +208,7 @@ struct ImmediateInitializer {
template<>
struct ImmediateInitializer<Smi*> {
static const bool kIsIntType = false;
static inline RelocInfo::Mode rmode_for(Smi* t) {
return RelocInfo::NONE64;
}
static inline RelocInfo::Mode rmode_for(Smi* t) { return RelocInfo::NONE; }
static inline int64_t immediate_for(Smi* t) {;
return reinterpret_cast<int64_t>(t);
}
@ -581,26 +576,23 @@ Address Assembler::return_address_from_call_start(Address pc) {
}
}
void Assembler::deserialization_set_special_target_at(
Isolate* isolate, Address constant_pool_entry, Code* code, Address target) {
Address constant_pool_entry, Code* code, Address target) {
Memory::Address_at(constant_pool_entry) = target;
}
void Assembler::deserialization_set_target_internal_reference_at(
Isolate* isolate, Address pc, Address target, RelocInfo::Mode mode) {
Address pc, Address target, RelocInfo::Mode mode) {
Memory::Address_at(pc) = target;
}
void Assembler::set_target_address_at(Isolate* isolate, Address pc,
Address constant_pool, Address target,
void Assembler::set_target_address_at(Address pc, Address constant_pool,
Address target,
ICacheFlushMode icache_flush_mode) {
Memory::Address_at(target_pointer_address_at(pc)) = target;
// Intuitively, we would think it is necessary to always flush the
// instruction cache after patching a target address in the code as follows:
// Assembler::FlushICache(isolate(), pc, sizeof(target));
// Assembler::FlushICache(pc, sizeof(target));
// However, on ARM, an instruction is actually patched in the case of
// embedded constants of the form:
// ldr ip, [pc, #...]
@ -647,7 +639,7 @@ void RelocInfo::set_target_object(HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
Assembler::set_target_address_at(target->GetIsolate(), pc_, constant_pool_,
Assembler::set_target_address_at(pc_, constant_pool_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
@ -681,28 +673,28 @@ Address RelocInfo::target_runtime_entry(Assembler* origin) {
return target_address();
}
void RelocInfo::set_target_runtime_entry(Isolate* isolate, Address target,
void RelocInfo::set_target_runtime_entry(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsRuntimeEntry(rmode_));
if (target_address() != target) {
set_target_address(isolate, target, write_barrier_mode, icache_flush_mode);
set_target_address(target, write_barrier_mode, icache_flush_mode);
}
}
void RelocInfo::WipeOut(Isolate* isolate) {
void RelocInfo::WipeOut() {
DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_));
if (IsInternalReference(rmode_)) {
Memory::Address_at(pc_) = nullptr;
} else {
Assembler::set_target_address_at(isolate, pc_, constant_pool_, nullptr);
Assembler::set_target_address_at(pc_, constant_pool_, nullptr);
}
}
template <typename ObjectVisitor>
void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(host(), this);

View File

@ -181,22 +181,20 @@ uint32_t RelocInfo::embedded_size() const {
return Memory::uint32_at(Assembler::target_pointer_address_at(pc_));
}
void RelocInfo::set_embedded_address(Isolate* isolate, Address address,
void RelocInfo::set_embedded_address(Address address,
ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(isolate, pc_, constant_pool_, address,
flush_mode);
Assembler::set_target_address_at(pc_, constant_pool_, address, flush_mode);
}
void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
ICacheFlushMode flush_mode) {
void RelocInfo::set_embedded_size(uint32_t size, ICacheFlushMode flush_mode) {
Memory::uint32_at(Assembler::target_pointer_address_at(pc_)) = size;
// No icache flushing needed, see comment in set_target_address_at.
}
void RelocInfo::set_js_to_wasm_address(Isolate* isolate, Address address,
void RelocInfo::set_js_to_wasm_address(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
set_embedded_address(isolate, address, icache_flush_mode);
set_embedded_address(address, icache_flush_mode);
}
Address RelocInfo::js_to_wasm_address() const {
@ -467,9 +465,6 @@ void ConstPool::Clear() {
bool ConstPool::CanBeShared(RelocInfo::Mode mode) {
// Constant pool currently does not support 32-bit entries.
DCHECK(mode != RelocInfo::NONE32);
return RelocInfo::IsNone(mode) ||
(mode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE);
}
@ -2994,6 +2989,8 @@ void Assembler::isb() {
Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll));
}
void Assembler::csdb() { hint(CSDB); }
void Assembler::fmov(const VRegister& vd, double imm) {
if (vd.IsScalar()) {
DCHECK(vd.Is1D());
@ -4745,6 +4742,9 @@ void Assembler::GrowBuffer() {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
// Non-relocatable constants should not end up in the literal pool.
DCHECK(!RelocInfo::IsNone(rmode));
// We do not try to reuse pool constants.
RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, nullptr);
bool write_reloc_info = true;

View File

@ -68,7 +68,6 @@ namespace internal {
// clang-format on
constexpr int kRegListSizeInBits = sizeof(RegList) * kBitsPerByte;
static const int kNoCodeAgeSequenceLength = 5 * kInstructionSize;
const int kNumRegs = kNumberOfRegisters;
// Registers x0-x17 are caller-saved.
@ -455,8 +454,8 @@ constexpr Register no_reg = NoReg;
GENERAL_REGISTER_CODE_LIST(DEFINE_REGISTERS)
#undef DEFINE_REGISTERS
DEFINE_REGISTER(Register, wcsp, kSPRegInternalCode, kWRegSizeInBits);
DEFINE_REGISTER(Register, csp, kSPRegInternalCode, kXRegSizeInBits);
DEFINE_REGISTER(Register, wsp, kSPRegInternalCode, kWRegSizeInBits);
DEFINE_REGISTER(Register, sp, kSPRegInternalCode, kXRegSizeInBits);
#define DEFINE_VREGISTERS(N) \
DEFINE_REGISTER(VRegister, b##N, N, kBRegSizeInBits); \
@ -994,7 +993,7 @@ class Assembler : public AssemblerBase {
// The isolate argument is unused (and may be nullptr) when skipping flushing.
inline static Address target_address_at(Address pc, Address constant_pool);
inline static void set_target_address_at(
Isolate* isolate, Address pc, Address constant_pool, Address target,
Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// Return the code target address at a call site from the return address of
@ -1008,12 +1007,11 @@ class Assembler : public AssemblerBase {
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
inline static void deserialization_set_special_target_at(
Isolate* isolate, Address constant_pool_entry, Code* code,
Address target);
Address constant_pool_entry, Code* code, Address target);
// This sets the internal reference at the pc.
inline static void deserialization_set_target_internal_reference_at(
Isolate* isolate, Address pc, Address target,
Address pc, Address target,
RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
// All addresses in the constant pool are the same size as pointers.
@ -1754,6 +1752,9 @@ class Assembler : public AssemblerBase {
// Instruction synchronization barrier
void isb();
// Conditional speculation barrier.
void csdb();
// Alias for system instructions.
void nop() { hint(NOP); }
@ -3677,18 +3678,9 @@ class PatchingAssembler : public Assembler {
// If more or fewer instructions than expected are generated or if some
// relocation information takes space in the buffer, the PatchingAssembler
// will crash trying to grow the buffer.
// This version will flush at destruction.
PatchingAssembler(Isolate* isolate, byte* start, unsigned count)
: PatchingAssembler(IsolateData(isolate), start, count) {
CHECK_NOT_NULL(isolate);
isolate_ = isolate;
}
// This version will not flush.
// Note that the instruction cache will not be flushed.
PatchingAssembler(IsolateData isolate_data, byte* start, unsigned count)
: Assembler(isolate_data, start, count * kInstructionSize + kGap),
isolate_(nullptr) {
: Assembler(isolate_data, start, count * kInstructionSize + kGap) {
// Block constant pool emission.
StartBlockPools();
}
@ -3701,18 +3693,12 @@ class PatchingAssembler : public Assembler {
DCHECK((pc_offset() + kGap) == buffer_size_);
// Verify no relocation information has been emitted.
DCHECK(IsConstPoolEmpty());
// Flush the Instruction cache.
size_t length = buffer_size_ - kGap;
if (isolate_ != nullptr) Assembler::FlushICache(isolate_, buffer_, length);
}
// See definition of PatchAdrFar() for details.
static constexpr int kAdrFarPatchableNNops = 2;
static constexpr int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 2;
void PatchAdrFar(int64_t target_offset);
private:
Isolate* isolate_;
};

View File

@ -30,7 +30,7 @@ namespace internal {
void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
__ Mov(x5, Operand(x0, LSL, kPointerSizeLog2));
__ Str(x1, MemOperand(__ StackPointer(), x5));
__ Poke(x1, Operand(x5));
__ Push(x1, x2);
__ Add(x0, x0, Operand(3));
__ TailCallRuntime(Runtime::kNewArray);
@ -314,7 +314,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ EnterExitFrame(
save_doubles(), x10, extra_stack_space,
is_builtin_exit() ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
DCHECK(csp.Is(__ StackPointer()));
// Poke callee-saved registers into reserved space.
__ Poke(argv, 1 * kPointerSize);
@ -349,12 +348,12 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// fp -> fp[0]: CallerFP (old fp)
// fp[-8]: Space reserved for SPOffset.
// fp[-16]: CodeObject()
// csp[...]: Saved doubles, if saved_doubles is true.
// csp[32]: Alignment padding, if necessary.
// csp[24]: Preserved x23 (used for target).
// csp[16]: Preserved x22 (used for argc).
// csp[8]: Preserved x21 (used for argv).
// csp -> csp[0]: Space reserved for the return address.
// sp[...]: Saved doubles, if saved_doubles is true.
// sp[32]: Alignment padding, if necessary.
// sp[24]: Preserved x23 (used for target).
// sp[16]: Preserved x22 (used for argc).
// sp[8]: Preserved x21 (used for argv).
// sp -> sp[0]: Space reserved for the return address.
//
// After a successful call, the exit frame, preserved registers (x21-x23) and
// the arguments (including the receiver) are dropped or popped as
@ -364,8 +363,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// untouched, and the stub either throws an exception by jumping to one of
// the exception_returned label.
DCHECK(csp.Is(__ StackPointer()));
// Prepare AAPCS64 arguments to pass to the builtin.
__ Mov(x0, argc);
__ Mov(x1, argv);
@ -437,7 +434,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// contain the current pending exception, don't clobber it.
ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler,
isolate());
DCHECK(csp.Is(masm->StackPointer()));
{
FrameScope scope(masm, StackFrame::MANUAL);
__ Mov(x0, 0); // argc.
@ -454,7 +450,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
Register scratch = temps.AcquireX();
__ Mov(scratch, Operand(pending_handler_sp_address));
__ Ldr(scratch, MemOperand(scratch));
__ Mov(csp, scratch);
__ Mov(sp, scratch);
}
__ Mov(fp, Operand(pending_handler_fp_address));
__ Ldr(fp, MemOperand(fp));
@ -466,6 +462,12 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ Bind(&not_js_frame);
// Reset the masking register. This is done independent of the underlying
// feature flag {FLAG_branch_load_poisoning} to make the snapshot work with
// both configurations. It is safe to always do this, because the underlying
// register is caller-saved and can be arbitrarily clobbered.
__ ResetSpeculationPoisonRegister();
// Compute the handler entry address and jump to it.
__ Mov(x10, Operand(pending_handler_entrypoint_address));
__ Ldr(x10, MemOperand(x10));
@ -511,7 +513,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Push(x13, x12, xzr, x10);
// Set up fp.
__ Sub(fp, __ StackPointer(), EntryFrameConstants::kCallerFPOffset);
__ Sub(fp, sp, EntryFrameConstants::kCallerFPOffset);
// Push the JS entry frame marker. Also set js_entry_sp if this is the
// outermost JS call.
@ -582,7 +584,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.AcquireX();
__ Mov(scratch, __ StackPointer());
__ Mov(scratch, sp);
__ Str(scratch, MemOperand(x11));
}
@ -740,10 +742,6 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
Register target) {
// Make sure the caller configured the stack pointer (see comment in
// DirectCEntryStub::Generate).
DCHECK(csp.Is(__ StackPointer()));
intptr_t code =
reinterpret_cast<intptr_t>(GetCode().location());
__ Mov(lr, Operand(code, RelocInfo::CODE_TARGET));
@ -1260,7 +1258,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// Prepare arguments.
Register args = x6;
__ Mov(args, masm->StackPointer());
__ Mov(args, sp);
// Allocate the v8::Arguments structure in the arguments' space, since it's
// not controlled by GC.
@ -1344,7 +1342,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
"slots must be a multiple of 2 for stack pointer alignment");
// Load address of v8::PropertyAccessorInfo::args_ array and name handle.
__ Mov(x0, masm->StackPointer()); // x0 = Handle<Name>
__ Mov(x0, sp); // x0 = Handle<Name>
__ Add(x1, x0, 1 * kPointerSize); // x1 = v8::PCI::args_
const int kApiStackSpace = 1;

View File

@ -407,12 +407,13 @@ enum Extend {
};
enum SystemHint {
NOP = 0,
NOP = 0,
YIELD = 1,
WFE = 2,
WFI = 3,
SEV = 4,
SEVL = 5
WFE = 2,
WFI = 3,
SEV = 4,
SEVL = 5,
CSDB = 20
};
enum BarrierDomain {

View File

@ -168,11 +168,6 @@ void Decoder<V>::DecodeBranchSystemException(Instruction* instr) {
(instr->Mask(0x0039E000) == 0x00002000) ||
(instr->Mask(0x003AE000) == 0x00002000) ||
(instr->Mask(0x003CE000) == 0x00042000) ||
(instr->Mask(0x003FFFC0) == 0x000320C0) ||
(instr->Mask(0x003FF100) == 0x00032100) ||
(instr->Mask(0x003FF200) == 0x00032200) ||
(instr->Mask(0x003FF400) == 0x00032400) ||
(instr->Mask(0x003FF800) == 0x00032800) ||
(instr->Mask(0x0038F000) == 0x00005000) ||
(instr->Mask(0x0038E000) == 0x00006000)) {
V::VisitUnallocated(instr);
@ -467,6 +462,7 @@ void Decoder<V>::DecodeDataProcessing(Instruction* instr) {
}
break;
}
V8_FALLTHROUGH;
}
case 1:
case 3:

View File

@ -33,7 +33,7 @@ void CopyRegListToFrame(MacroAssembler* masm, const Register& dst,
// up a temp with an offset for accesses out of the range of the addressing
// mode.
Register src = temps.AcquireX();
masm->Add(src, masm->StackPointer(), src_offset);
masm->Add(src, sp, src_offset);
masm->Add(dst, dst, dst_offset);
// Write reg_list into the frame pointed to by dst.
@ -140,8 +140,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ Mov(code_object, lr);
// Compute the fp-to-sp delta, adding two words for alignment padding and
// bailout id.
__ Add(fp_to_sp, __ StackPointer(),
kSavedRegistersAreaSize + (2 * kPointerSize));
__ Add(fp_to_sp, sp, kSavedRegistersAreaSize + (2 * kPointerSize));
__ Sub(fp_to_sp, fp, fp_to_sp);
// Allocate a new deoptimizer object.
@ -222,7 +221,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
UseScratchRegisterScope temps(masm());
Register scratch = temps.AcquireX();
__ Ldr(scratch, MemOperand(x4, Deoptimizer::caller_frame_top_offset()));
__ Mov(__ StackPointer(), scratch);
__ Mov(sp, scratch);
}
// Replace the current (input) frame with the output frames.

View File

@ -968,7 +968,7 @@ void DisassemblingDecoder::VisitFPCompare(Instruction* instr) {
switch (instr->Mask(FPCompareMask)) {
case FCMP_s_zero:
case FCMP_d_zero: form = form_zero; // Fall through.
case FCMP_d_zero: form = form_zero; V8_FALLTHROUGH;
case FCMP_s:
case FCMP_d: mnemonic = "fcmp"; break;
default: form = "(FPCompare)";
@ -1246,6 +1246,11 @@ void DisassemblingDecoder::VisitSystem(Instruction* instr) {
form = nullptr;
break;
}
case CSDB: {
mnemonic = "csdb";
form = nullptr;
break;
}
}
} else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) {
switch (instr->Mask(MemBarrierMask)) {
@ -3327,7 +3332,7 @@ void DisassemblingDecoder::AppendRegisterNameToOutput(const CPURegister& reg) {
}
}
if (reg.IsVRegister() || !(reg.Aliases(csp) || reg.Aliases(xzr))) {
if (reg.IsVRegister() || !(reg.Aliases(sp) || reg.Aliases(xzr))) {
// Filter special registers
if (reg.IsX() && (reg.code() == 27)) {
AppendToOutput("cp");
@ -3339,9 +3344,9 @@ void DisassemblingDecoder::AppendRegisterNameToOutput(const CPURegister& reg) {
// A core or scalar/vector register: [wx]0 - 30, [bhsdq]0 - 31.
AppendToOutput("%c%d", reg_char, reg.code());
}
} else if (reg.Aliases(csp)) {
// Disassemble w31/x31 as stack pointer wcsp/csp.
AppendToOutput("%s", reg.Is64Bits() ? "csp" : "wcsp");
} else if (reg.Aliases(sp)) {
// Disassemble w31/x31 as stack pointer wsp/sp.
AppendToOutput("%s", reg.Is64Bits() ? "sp" : "wsp");
} else {
// Disassemble w31/x31 as zero register wzr/xzr.
AppendToOutput("%czr", reg_char);
@ -3713,6 +3718,8 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
}
return 0;
}
UNIMPLEMENTED();
return 0;
}
case 'L': { // IVLSLane[0123] - suffix indicates access size shift.
AppendToOutput("%d", instr->NEONLSIndex(format[8] - '0'));
@ -3836,7 +3843,8 @@ int DisassemblingDecoder::SubstituteShiftField(Instruction* instr,
switch (format[1]) {
case 'D': { // NDP.
DCHECK(instr->ShiftDP() != ROR);
} // Fall through.
V8_FALLTHROUGH;
}
case 'L': { // NLo.
if (instr->ImmDPShift() != 0) {
const char* shift_type[] = {"lsl", "lsr", "asr", "ror"};

View File

@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_ARM64_DISASM_ARM64_H
#define V8_ARM64_DISASM_ARM64_H
#ifndef V8_ARM64_DISASM_ARM64_H_
#define V8_ARM64_DISASM_ARM64_H_
#include "src/arm64/assembler-arm64.h"
#include "src/arm64/decoder-arm64.h"
@ -96,4 +96,4 @@ class PrintDisassembler : public DisassemblingDecoder {
} // namespace internal
} // namespace v8
#endif // V8_ARM64_DISASM_ARM64_H
#endif // V8_ARM64_DISASM_ARM64_H_

View File

@ -11,7 +11,7 @@ namespace internal {
static const int kX0DwarfCode = 0;
static const int kFpDwarfCode = 29;
static const int kLrDwarfCode = 30;
static const int kCSpDwarfCode = 31;
static const int kSpDwarfCode = 31;
const int EhFrameConstants::kCodeAlignmentFactor = 4;
const int EhFrameConstants::kDataAlignmentFactor = -8;
@ -33,7 +33,7 @@ int EhFrameWriter::RegisterToDwarfCode(Register name) {
case kRegCode_x30:
return kLrDwarfCode;
case kSPRegInternalCode:
return kCSpDwarfCode;
return kSpDwarfCode;
case kRegCode_x0:
return kX0DwarfCode;
default:
@ -51,8 +51,8 @@ const char* EhFrameDisassembler::DwarfRegisterCodeToString(int code) {
return "fp";
case kLrDwarfCode:
return "lr";
case kCSpDwarfCode:
return "csp"; // This could be zr as well
case kSpDwarfCode:
return "sp"; // This could be zr as well
default:
UNIMPLEMENTED();
return nullptr;

View File

@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_ARM64_FRAMES_ARM64_H_
#define V8_ARM64_FRAMES_ARM64_H_
#ifndef V8_ARM64_FRAME_CONSTANTS_ARM64_H_
#define V8_ARM64_FRAME_CONSTANTS_ARM64_H_
namespace v8 {
namespace internal {
@ -61,4 +61,4 @@ class JavaScriptFrameConstants : public AllStatic {
} // namespace internal
} // namespace v8
#endif // V8_ARM64_FRAMES_ARM64_H_
#endif // V8_ARM64_FRAME_CONSTANTS_ARM64_H_

View File

@ -258,7 +258,7 @@ class Instruction {
// Indicate whether Rd can be the stack pointer or the zero register. This
// does not check that the instruction actually has an Rd field.
Reg31Mode RdMode() const {
// The following instructions use csp or wsp as Rd:
// The following instructions use sp or wsp as Rd:
// Add/sub (immediate) when not setting the flags.
// Add/sub (extended) when not setting the flags.
// Logical (immediate) when not setting the flags.
@ -272,7 +272,7 @@ class Instruction {
}
if (IsLogicalImmediate()) {
// Of the logical (immediate) instructions, only ANDS (and its aliases)
// can set the flags. The others can all write into csp.
// can set the flags. The others can all write into sp.
// Note that some logical operations are not available to
// immediate-operand instructions, so we have to combine two masks here.
if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) {
@ -287,7 +287,7 @@ class Instruction {
// Indicate whether Rn can be the stack pointer or the zero register. This
// does not check that the instruction actually has an Rn field.
Reg31Mode RnMode() const {
// The following instructions use csp or wsp as Rn:
// The following instructions use sp or wsp as Rn:
// All loads and stores.
// Add/sub (immediate).
// Add/sub (extended).

View File

@ -91,7 +91,6 @@ static const CounterDescriptor kCounterList[] = {
{"PC Addressing", Gauge},
{"Other", Gauge},
{"SP Adjust", Gauge},
};
Instrument::Instrument(const char* datafile, uint64_t sample_period)
@ -238,16 +237,8 @@ void Instrument::VisitPCRelAddressing(Instruction* instr) {
void Instrument::VisitAddSubImmediate(Instruction* instr) {
Update();
static Counter* sp_counter = GetCounter("SP Adjust");
static Counter* add_sub_counter = GetCounter("Add/Sub DP");
if (((instr->Mask(AddSubOpMask) == SUB) ||
(instr->Mask(AddSubOpMask) == ADD)) &&
(instr->Rd() == 31) && (instr->Rn() == 31)) {
// Count adjustments to the C stack pointer caused by V8 needing two SPs.
sp_counter->Increment();
} else {
add_sub_counter->Increment();
}
static Counter* counter = GetCounter("Add/Sub DP");
counter->Increment();
}
@ -470,16 +461,8 @@ void Instrument::VisitAddSubShifted(Instruction* instr) {
void Instrument::VisitAddSubExtended(Instruction* instr) {
Update();
static Counter* sp_counter = GetCounter("SP Adjust");
static Counter* add_sub_counter = GetCounter("Add/Sub DP");
if (((instr->Mask(AddSubOpMask) == SUB) ||
(instr->Mask(AddSubOpMask) == ADD)) &&
(instr->Rd() == 31) && (instr->Rn() == 31)) {
// Count adjustments to the C stack pointer caused by V8 needing two SPs.
sp_counter->Increment();
} else {
add_sub_counter->Increment();
}
static Counter* counter = GetCounter("Add/Sub DP");
counter->Increment();
}

View File

@ -69,15 +69,6 @@ const Register GrowArrayElementsDescriptor::ObjectRegister() { return x0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return x3; }
void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1: function info
// x2: feedback vector
// x3: slot
Register registers[] = {x1, x2, x3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return x0; }

View File

@ -404,8 +404,7 @@ void MacroAssembler::CzeroX(const Register& rd,
// Conditionally move a value into the destination register. Only X registers
// are supported due to the truncation side-effect when used on W registers.
void MacroAssembler::CmovX(const Register& rd,
const Register& rn,
void TurboAssembler::CmovX(const Register& rd, const Register& rn,
Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsSP());
@ -416,6 +415,11 @@ void MacroAssembler::CmovX(const Register& rd,
}
}
void TurboAssembler::Csdb() {
DCHECK(allow_macro_instructions());
csdb();
}
void TurboAssembler::Cset(const Register& rd, Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
@ -423,8 +427,7 @@ void TurboAssembler::Cset(const Register& rd, Condition cond) {
cset(rd, cond);
}
void MacroAssembler::Csetm(const Register& rd, Condition cond) {
void TurboAssembler::Csetm(const Register& rd, Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
DCHECK((cond != al) && (cond != nv));
@ -461,14 +464,12 @@ void MacroAssembler::Csneg(const Register& rd,
csneg(rd, rn, rm, cond);
}
void MacroAssembler::Dmb(BarrierDomain domain, BarrierType type) {
void TurboAssembler::Dmb(BarrierDomain domain, BarrierType type) {
DCHECK(allow_macro_instructions());
dmb(domain, type);
}
void MacroAssembler::Dsb(BarrierDomain domain, BarrierType type) {
void TurboAssembler::Dsb(BarrierDomain domain, BarrierType type) {
DCHECK(allow_macro_instructions());
dsb(domain, type);
}
@ -651,10 +652,12 @@ void TurboAssembler::Fmov(VRegister vd, double imm) {
if (bits == 0) {
fmov(vd, xzr);
} else {
Ldr(vd, imm);
UseScratchRegisterScope temps(this);
Register tmp = temps.AcquireX();
Mov(tmp, bits);
fmov(vd, tmp);
}
} else {
// TODO(all): consider NEON support for load literal.
Movi(vd, bits);
}
}
@ -678,12 +681,10 @@ void TurboAssembler::Fmov(VRegister vd, float imm) {
} else {
UseScratchRegisterScope temps(this);
Register tmp = temps.AcquireW();
// TODO(all): Use Assembler::ldr(const VRegister& ft, float imm).
Mov(tmp, bit_cast<uint32_t>(imm));
Fmov(vd, tmp);
}
} else {
// TODO(all): consider NEON support for load literal.
Movi(vd, bits);
}
}
@ -737,8 +738,7 @@ void MacroAssembler::Hlt(int code) {
hlt(code);
}
void MacroAssembler::Isb() {
void TurboAssembler::Isb() {
DCHECK(allow_macro_instructions());
isb();
}
@ -748,12 +748,6 @@ void TurboAssembler::Ldr(const CPURegister& rt, const Operand& operand) {
ldr(rt, operand);
}
void TurboAssembler::Ldr(const CPURegister& rt, double imm) {
DCHECK(allow_macro_instructions());
DCHECK(rt.Is64Bits());
ldr(rt, Immediate(bit_cast<uint64_t>(imm)));
}
void TurboAssembler::Lsl(const Register& rd, const Register& rn,
unsigned shift) {
DCHECK(allow_macro_instructions());
@ -1042,58 +1036,6 @@ void TurboAssembler::Uxtw(const Register& rd, const Register& rn) {
uxtw(rd, rn);
}
void MacroAssembler::AlignAndSetCSPForFrame() {
int sp_alignment = ActivationFrameAlignment();
// AAPCS64 mandates at least 16-byte alignment.
DCHECK_GE(sp_alignment, 16);
DCHECK(base::bits::IsPowerOfTwo(sp_alignment));
Bic(csp, StackPointer(), sp_alignment - 1);
}
void TurboAssembler::BumpSystemStackPointer(const Operand& space) {
DCHECK(!csp.Is(StackPointer()));
if (!TmpList()->IsEmpty()) {
Sub(csp, StackPointer(), space);
} else {
// TODO(jbramley): Several callers rely on this not using scratch
// registers, so we use the assembler directly here. However, this means
// that large immediate values of 'space' cannot be handled cleanly. (Only
// 24-bits immediates or values of 'space' that can be encoded in one
// instruction are accepted.) Once we implement our flexible scratch
// register idea, we could greatly simplify this function.
InstructionAccurateScope scope(this);
DCHECK(space.IsImmediate());
// Align to 16 bytes.
uint64_t imm = RoundUp(space.ImmediateValue(), 0x10);
DCHECK(is_uint24(imm));
Register source = StackPointer();
if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
bic(csp, source, 0xf);
source = csp;
}
if (!is_uint12(imm)) {
int64_t imm_top_12_bits = imm >> 12;
sub(csp, source, imm_top_12_bits << 12);
source = csp;
imm -= imm_top_12_bits << 12;
}
if (imm > 0) {
sub(csp, source, imm);
}
}
AssertStackConsistency();
}
void TurboAssembler::SyncSystemStackPointer() {
DCHECK(emit_debug_code());
DCHECK(!csp.Is(StackPointer()));
{ InstructionAccurateScope scope(this);
mov(csp, StackPointer());
}
AssertStackConsistency();
}
void TurboAssembler::InitializeRootRegister() {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
@ -1249,14 +1191,9 @@ void TurboAssembler::Claim(int64_t count, uint64_t unit_size) {
if (size == 0) {
return;
}
DCHECK_EQ(size % 16, 0);
if (csp.Is(StackPointer())) {
DCHECK_EQ(size % 16, 0);
} else {
BumpSystemStackPointer(size);
}
Sub(StackPointer(), StackPointer(), size);
Sub(sp, sp, size);
}
void TurboAssembler::Claim(const Register& count, uint64_t unit_size) {
@ -1269,13 +1206,9 @@ void TurboAssembler::Claim(const Register& count, uint64_t unit_size) {
if (size.IsZero()) {
return;
}
AssertPositiveOrZero(count);
if (!csp.Is(StackPointer())) {
BumpSystemStackPointer(size);
}
Sub(StackPointer(), StackPointer(), size);
Sub(sp, sp, size);
}
@ -1290,11 +1223,7 @@ void MacroAssembler::ClaimBySMI(const Register& count_smi, uint64_t unit_size) {
return;
}
if (!csp.Is(StackPointer())) {
BumpSystemStackPointer(size);
}
Sub(StackPointer(), StackPointer(), size);
Sub(sp, sp, size);
}
void TurboAssembler::Drop(int64_t count, uint64_t unit_size) {
@ -1305,16 +1234,8 @@ void TurboAssembler::Drop(int64_t count, uint64_t unit_size) {
return;
}
Add(StackPointer(), StackPointer(), size);
if (csp.Is(StackPointer())) {
DCHECK_EQ(size % 16, 0);
} else if (emit_debug_code()) {
// It is safe to leave csp where it is when unwinding the JavaScript stack,
// but if we keep it matching StackPointer, the simulator can detect memory
// accesses in the now-free part of the stack.
SyncSystemStackPointer();
}
Add(sp, sp, size);
DCHECK_EQ(size % 16, 0);
}
void TurboAssembler::Drop(const Register& count, uint64_t unit_size) {
@ -1329,14 +1250,7 @@ void TurboAssembler::Drop(const Register& count, uint64_t unit_size) {
}
AssertPositiveOrZero(count);
Add(StackPointer(), StackPointer(), size);
if (!csp.Is(StackPointer()) && emit_debug_code()) {
// It is safe to leave csp where it is when unwinding the JavaScript stack,
// but if we keep it matching StackPointer, the simulator can detect memory
// accesses in the now-free part of the stack.
SyncSystemStackPointer();
}
Add(sp, sp, size);
}
void TurboAssembler::DropArguments(const Register& count,
@ -1378,14 +1292,7 @@ void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) {
return;
}
Add(StackPointer(), StackPointer(), size);
if (!csp.Is(StackPointer()) && emit_debug_code()) {
// It is safe to leave csp where it is when unwinding the JavaScript stack,
// but if we keep it matching StackPointer, the simulator can detect memory
// accesses in the now-free part of the stack.
SyncSystemStackPointer();
}
Add(sp, sp, size);
}

View File

@ -15,6 +15,7 @@
#include "src/frame-constants.h"
#include "src/frames-inl.h"
#include "src/heap/heap-inl.h"
#include "src/instruction-stream.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
@ -188,15 +189,14 @@ void TurboAssembler::LogicalMacro(const Register& rd, const Register& rn,
// If the left-hand input is the stack pointer, we can't pre-shift the
// immediate, as the encoding won't allow the subsequent post shift.
PreShiftImmMode mode = rn.Is(csp) ? kNoShift : kAnyShift;
PreShiftImmMode mode = rn.Is(sp) ? kNoShift : kAnyShift;
Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate, mode);
if (rd.Is(csp)) {
if (rd.IsSP()) {
// If rd is the stack pointer we cannot use it as the destination
// register so we use the temp register as an intermediate again.
Logical(temp, rn, imm_operand, op);
Mov(csp, temp);
AssertStackConsistency();
Mov(sp, temp);
} else {
Logical(rd, rn, imm_operand, op);
}
@ -294,7 +294,6 @@ void TurboAssembler::Mov(const Register& rd, uint64_t imm) {
// pointer.
if (rd.IsSP()) {
mov(rd, temp);
AssertStackConsistency();
}
}
}
@ -337,7 +336,7 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand,
// registers is not required to clear the top word of the X register. In
// this case, the instruction is discarded.
//
// If csp is an operand, add #0 is emitted, otherwise, orr #0.
// If sp is an operand, add #0 is emitted, otherwise, orr #0.
if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
(discard_mode == kDontDiscardForSameWReg))) {
Assembler::mov(rd, operand.reg());
@ -596,11 +595,8 @@ void TurboAssembler::ConditionalCompareMacro(const Register& rn,
}
}
void MacroAssembler::Csel(const Register& rd,
const Register& rn,
const Operand& operand,
Condition cond) {
void TurboAssembler::Csel(const Register& rd, const Register& rn,
const Operand& operand, Condition cond) {
DCHECK(allow_macro_instructions());
DCHECK(!rd.IsZero());
DCHECK((cond != al) && (cond != nv));
@ -724,11 +720,11 @@ void TurboAssembler::AddSubMacro(const Register& rd, const Register& rn,
// If the destination or source register is the stack pointer, we can
// only pre-shift the immediate right by values supported in the add/sub
// extend encoding.
if (rd.Is(csp)) {
if (rd.Is(sp)) {
// If the destination is SP and flags will be set, we can't pre-shift
// the immediate at all.
mode = (S == SetFlags) ? kNoShift : kLimitShiftForSP;
} else if (rn.Is(csp)) {
} else if (rn.Is(sp)) {
mode = kLimitShiftForSP;
}
@ -1105,9 +1101,9 @@ void TurboAssembler::Push(const Register& src0, const VRegister& src1) {
PushPreamble(size);
// Reserve room for src0 and push src1.
str(src1, MemOperand(StackPointer(), -size, PreIndex));
str(src1, MemOperand(sp, -size, PreIndex));
// Fill the gap with src0.
str(src0, MemOperand(StackPointer(), src1.SizeInBytes()));
str(src0, MemOperand(sp, src1.SizeInBytes()));
}
@ -1166,9 +1162,7 @@ void TurboAssembler::PushCPURegList(CPURegList registers) {
int size = registers.RegisterSizeInBytes();
PushPreamble(registers.Count(), size);
// Push up to four registers at a time because if the current stack pointer is
// csp and reg_size is 32, registers must be pushed in blocks of four in order
// to maintain the 16-byte alignment for csp.
// Push up to four registers at a time.
while (!registers.IsEmpty()) {
int count_before = registers.Count();
const CPURegister& src0 = registers.PopHighestIndex();
@ -1183,9 +1177,7 @@ void TurboAssembler::PushCPURegList(CPURegList registers) {
void TurboAssembler::PopCPURegList(CPURegList registers) {
int size = registers.RegisterSizeInBytes();
// Pop up to four registers at a time because if the current stack pointer is
// csp and reg_size is 32, registers must be pushed in blocks of four in
// order to maintain the 16-byte alignment for csp.
// Pop up to four registers at a time.
while (!registers.IsEmpty()) {
int count_before = registers.Count();
const CPURegister& dst0 = registers.PopLowestIndex();
@ -1258,23 +1250,23 @@ void TurboAssembler::PushHelper(int count, int size, const CPURegister& src0,
switch (count) {
case 1:
DCHECK(src1.IsNone() && src2.IsNone() && src3.IsNone());
str(src0, MemOperand(StackPointer(), -1 * size, PreIndex));
str(src0, MemOperand(sp, -1 * size, PreIndex));
break;
case 2:
DCHECK(src2.IsNone() && src3.IsNone());
stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex));
stp(src1, src0, MemOperand(sp, -2 * size, PreIndex));
break;
case 3:
DCHECK(src3.IsNone());
stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex));
str(src0, MemOperand(StackPointer(), 2 * size));
stp(src2, src1, MemOperand(sp, -3 * size, PreIndex));
str(src0, MemOperand(sp, 2 * size));
break;
case 4:
// Skip over 4 * size, then fill in the gap. This allows four W registers
// to be pushed using csp, whilst maintaining 16-byte alignment for csp
// to be pushed using sp, whilst maintaining 16-byte alignment for sp
// at all times.
stp(src3, src2, MemOperand(StackPointer(), -4 * size, PreIndex));
stp(src1, src0, MemOperand(StackPointer(), 2 * size));
stp(src3, src2, MemOperand(sp, -4 * size, PreIndex));
stp(src1, src0, MemOperand(sp, 2 * size));
break;
default:
UNREACHABLE();
@ -1295,24 +1287,24 @@ void TurboAssembler::PopHelper(int count, int size, const CPURegister& dst0,
switch (count) {
case 1:
DCHECK(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex));
ldr(dst0, MemOperand(sp, 1 * size, PostIndex));
break;
case 2:
DCHECK(dst2.IsNone() && dst3.IsNone());
ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex));
ldp(dst0, dst1, MemOperand(sp, 2 * size, PostIndex));
break;
case 3:
DCHECK(dst3.IsNone());
ldr(dst2, MemOperand(StackPointer(), 2 * size));
ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex));
ldr(dst2, MemOperand(sp, 2 * size));
ldp(dst0, dst1, MemOperand(sp, 3 * size, PostIndex));
break;
case 4:
// Load the higher addresses first, then load the lower addresses and
// skip the whole block in the second instruction. This allows four W
// registers to be popped using csp, whilst maintaining 16-byte alignment
// for csp at all times.
ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size));
ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex));
// registers to be popped using sp, whilst maintaining 16-byte alignment
// for sp at all times.
ldp(dst2, dst3, MemOperand(sp, 2 * size));
ldp(dst0, dst1, MemOperand(sp, 4 * size, PostIndex));
break;
default:
UNREACHABLE();
@ -1322,43 +1314,27 @@ void TurboAssembler::PopHelper(int count, int size, const CPURegister& dst0,
void TurboAssembler::PushPreamble(Operand total_size) {
if (total_size.IsZero()) return;
if (csp.Is(StackPointer())) {
// If the current stack pointer is csp, then it must be aligned to 16 bytes
// on entry and the total size of the specified registers must also be a
// multiple of 16 bytes.
if (total_size.IsImmediate()) {
DCHECK_EQ(total_size.ImmediateValue() % 16, 0);
}
// Don't check access size for non-immediate sizes. It's difficult to do
// well, and it will be caught by hardware (or the simulator) anyway.
} else {
// Even if the current stack pointer is not the system stack pointer (csp),
// the system stack pointer will still be modified in order to comply with
// ABI rules about accessing memory below the system stack pointer.
BumpSystemStackPointer(total_size);
// The stack pointer must be aligned to 16 bytes on entry, and the total
// size of the specified registers must also be a multiple of 16 bytes.
if (total_size.IsImmediate()) {
DCHECK_EQ(total_size.ImmediateValue() % 16, 0);
}
// Don't check access size for non-immediate sizes. It's difficult to do
// well, and it will be caught by hardware (or the simulator) anyway.
}
void TurboAssembler::PopPostamble(Operand total_size) {
if (total_size.IsZero()) return;
if (csp.Is(StackPointer())) {
// If the current stack pointer is csp, then it must be aligned to 16 bytes
// on entry and the total size of the specified registers must also be a
// multiple of 16 bytes.
if (total_size.IsImmediate()) {
DCHECK_EQ(total_size.ImmediateValue() % 16, 0);
}
// Don't check access size for non-immediate sizes. It's difficult to do
// well, and it will be caught by hardware (or the simulator) anyway.
} else if (emit_debug_code()) {
// It is safe to leave csp where it is when unwinding the JavaScript stack,
// but if we keep it matching StackPointer, the simulator can detect memory
// accesses in the now-free part of the stack.
SyncSystemStackPointer();
// The stack pointer must be aligned to 16 bytes on entry, and the total
// size of the specified registers must also be a multiple of 16 bytes.
if (total_size.IsImmediate()) {
DCHECK_EQ(total_size.ImmediateValue() % 16, 0);
}
// Don't check access size for non-immediate sizes. It's difficult to do
// well, and it will be caught by hardware (or the simulator) anyway.
}
void TurboAssembler::PushPreamble(int count, int size) {
@ -1376,7 +1352,7 @@ void TurboAssembler::Poke(const CPURegister& src, const Operand& offset) {
Check(le, AbortReason::kStackAccessBelowStackPointer);
}
Str(src, MemOperand(StackPointer(), offset));
Str(src, MemOperand(sp, offset));
}
@ -1388,14 +1364,14 @@ void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
Check(le, AbortReason::kStackAccessBelowStackPointer);
}
Ldr(dst, MemOperand(StackPointer(), offset));
Ldr(dst, MemOperand(sp, offset));
}
void TurboAssembler::PokePair(const CPURegister& src1, const CPURegister& src2,
int offset) {
DCHECK(AreSameSizeAndType(src1, src2));
DCHECK((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
Stp(src1, src2, MemOperand(StackPointer(), offset));
Stp(src1, src2, MemOperand(sp, offset));
}
@ -1404,7 +1380,7 @@ void MacroAssembler::PeekPair(const CPURegister& dst1,
int offset) {
DCHECK(AreSameSizeAndType(dst1, dst2));
DCHECK((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
Ldp(dst1, dst2, MemOperand(StackPointer(), offset));
Ldp(dst1, dst2, MemOperand(sp, offset));
}
@ -1412,11 +1388,7 @@ void MacroAssembler::PushCalleeSavedRegisters() {
// Ensure that the macro-assembler doesn't use any scratch registers.
InstructionAccurateScope scope(this);
// This method must not be called unless the current stack pointer is the
// system stack pointer (csp).
DCHECK(csp.Is(StackPointer()));
MemOperand tos(csp, -2 * static_cast<int>(kXRegSize), PreIndex);
MemOperand tos(sp, -2 * static_cast<int>(kXRegSize), PreIndex);
stp(d14, d15, tos);
stp(d12, d13, tos);
@ -1436,11 +1408,7 @@ void MacroAssembler::PopCalleeSavedRegisters() {
// Ensure that the macro-assembler doesn't use any scratch registers.
InstructionAccurateScope scope(this);
// This method must not be called unless the current stack pointer is the
// system stack pointer (csp).
DCHECK(csp.Is(StackPointer()));
MemOperand tos(csp, 2 * kXRegSize, PostIndex);
MemOperand tos(sp, 2 * kXRegSize, PostIndex);
ldp(x19, x20, tos);
ldp(x21, x22, tos);
@ -1455,44 +1423,15 @@ void MacroAssembler::PopCalleeSavedRegisters() {
ldp(d14, d15, tos);
}
void TurboAssembler::AssertStackConsistency() {
// Avoid emitting code when !use_real_abort() since non-real aborts cause too
// much code to be generated.
void TurboAssembler::AssertSpAligned() {
if (emit_debug_code() && use_real_aborts()) {
if (csp.Is(StackPointer())) {
// Always check the alignment of csp if ALWAYS_ALIGN_CSP is true. We
// can't check the alignment of csp without using a scratch register (or
// clobbering the flags), but the processor (or simulator) will abort if
// it is not properly aligned during a load.
ldr(xzr, MemOperand(csp, 0));
}
if (FLAG_enable_slow_asserts && !csp.Is(StackPointer())) {
Label ok;
// Check that csp <= StackPointer(), preserving all registers and NZCV.
sub(StackPointer(), csp, StackPointer());
cbz(StackPointer(), &ok); // Ok if csp == StackPointer().
tbnz(StackPointer(), kXSignBit, &ok); // Ok if csp < StackPointer().
// Avoid generating AssertStackConsistency checks for the Push in Abort.
{ DontEmitDebugCodeScope dont_emit_debug_code_scope(this);
// Restore StackPointer().
sub(StackPointer(), csp, StackPointer());
Abort(AbortReason::kTheCurrentStackPointerIsBelowCsp);
}
bind(&ok);
// Restore StackPointer().
sub(StackPointer(), csp, StackPointer());
}
}
}
void TurboAssembler::AssertCspAligned() {
if (emit_debug_code() && use_real_aborts()) {
// TODO(titzer): use a real assert for alignment check?
// Arm64 requires the stack pointer to be 16-byte aligned prior to address
// calculation.
UseScratchRegisterScope scope(this);
Register temp = scope.AcquireX();
ldr(temp, MemOperand(csp));
Mov(temp, sp);
Tst(temp, 15);
Check(eq, AbortReason::kUnexpectedStackPointer);
}
}
@ -1568,11 +1507,11 @@ void TurboAssembler::CopyDoubleWords(Register dst, Register src, Register count,
}
void TurboAssembler::SlotAddress(Register dst, int slot_offset) {
Add(dst, StackPointer(), slot_offset << kPointerSizeLog2);
Add(dst, sp, slot_offset << kPointerSizeLog2);
}
void TurboAssembler::SlotAddress(Register dst, Register slot_offset) {
Add(dst, StackPointer(), Operand(slot_offset, LSL, kPointerSizeLog2));
Add(dst, sp, Operand(slot_offset, LSL, kPointerSizeLog2));
}
void TurboAssembler::AssertFPCRState(Register fpcr) {
@ -1630,6 +1569,34 @@ void TurboAssembler::Move(Register dst, Register src) { Mov(dst, src); }
void TurboAssembler::Move(Register dst, Handle<HeapObject> x) { Mov(dst, x); }
void TurboAssembler::Move(Register dst, Smi* src) { Mov(dst, src); }
void TurboAssembler::Swap(Register lhs, Register rhs) {
DCHECK(lhs.IsSameSizeAndType(rhs));
DCHECK(!lhs.Is(rhs));
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
Mov(temp, rhs);
Mov(rhs, lhs);
Mov(lhs, temp);
}
void TurboAssembler::Swap(VRegister lhs, VRegister rhs) {
DCHECK(lhs.IsSameSizeAndType(rhs));
DCHECK(!lhs.Is(rhs));
UseScratchRegisterScope temps(this);
VRegister temp = VRegister::no_reg();
if (lhs.IsS()) {
temp = temps.AcquireS();
} else if (lhs.IsD()) {
temp = temps.AcquireD();
} else {
DCHECK(lhs.IsQ());
temp = temps.AcquireQ();
}
Mov(temp, rhs);
Mov(rhs, lhs);
Mov(lhs, temp);
}
void TurboAssembler::AssertSmi(Register object, AbortReason reason) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@ -1792,6 +1759,12 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
void MacroAssembler::JumpToInstructionStream(const InstructionStream* stream) {
uint64_t bytes_address = reinterpret_cast<uint64_t>(stream->bytes());
Mov(kOffHeapTrampolineRegister, bytes_address);
Br(kOffHeapTrampolineRegister);
}
void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
const Runtime::Function* function = Runtime::FunctionForId(fid);
DCHECK_EQ(1, function->result_size);
@ -1927,13 +1900,10 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) {
Bind(&start_call);
#endif
// Addresses always have 64 bits, so we shouldn't encounter NONE32.
DCHECK(rmode != RelocInfo::NONE32);
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
if (rmode == RelocInfo::NONE64) {
if (RelocInfo::IsNone(rmode)) {
// Addresses are 48 bits so we never need to load the upper 16 bits.
uint64_t imm = reinterpret_cast<uint64_t>(target);
// If we don't use ARM tagged addresses, the 16 higher bits must be 0.
@ -2009,62 +1979,15 @@ int TurboAssembler::CallSize(Label* target) {
int TurboAssembler::CallSize(Address target, RelocInfo::Mode rmode) {
USE(target);
// Addresses always have 64 bits, so we shouldn't encounter NONE32.
DCHECK(rmode != RelocInfo::NONE32);
if (rmode == RelocInfo::NONE64) {
return kCallSizeWithoutRelocation;
} else {
return kCallSizeWithRelocation;
}
return RelocInfo::IsNone(rmode) ? kCallSizeWithoutRelocation
: kCallSizeWithRelocation;
}
int TurboAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) {
USE(code);
// Addresses always have 64 bits, so we shouldn't encounter NONE32.
DCHECK(rmode != RelocInfo::NONE32);
if (rmode == RelocInfo::NONE64) {
return kCallSizeWithoutRelocation;
} else {
return kCallSizeWithRelocation;
}
}
void MacroAssembler::JumpIfHeapNumber(Register object, Label* on_heap_number,
SmiCheckType smi_check_type) {
Label on_not_heap_number;
if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(object, &on_not_heap_number);
}
AssertNotSmi(object);
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
JumpIfRoot(temp, Heap::kHeapNumberMapRootIndex, on_heap_number);
Bind(&on_not_heap_number);
}
void MacroAssembler::JumpIfNotHeapNumber(Register object,
Label* on_not_heap_number,
SmiCheckType smi_check_type) {
if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(object, on_not_heap_number);
}
AssertNotSmi(object);
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
JumpIfNotRoot(temp, Heap::kHeapNumberMapRootIndex, on_not_heap_number);
return RelocInfo::IsNone(rmode) ? kCallSizeWithoutRelocation
: kCallSizeWithRelocation;
}
void MacroAssembler::TryRepresentDoubleAsInt(Register as_int, VRegister value,
@ -2110,12 +2033,10 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
Register src_reg = caller_args_count_reg;
// Calculate the end of source area. +kPointerSize is for the receiver.
if (callee_args_count.is_reg()) {
Add(src_reg, StackPointer(),
Operand(callee_args_count.reg(), LSL, kPointerSizeLog2));
Add(src_reg, sp, Operand(callee_args_count.reg(), LSL, kPointerSizeLog2));
Add(src_reg, src_reg, kPointerSize);
} else {
Add(src_reg, StackPointer(),
(callee_args_count.immediate() + 1) * kPointerSize);
Add(src_reg, sp, (callee_args_count.immediate() + 1) * kPointerSize);
}
// Round src_reg up to a multiple of 16 bytes, so we include any potential
@ -2145,12 +2066,11 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
Ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
Str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
bind(&entry);
Cmp(StackPointer(), src_reg);
Cmp(sp, src_reg);
B(ne, &loop);
// Leave current frame.
Mov(StackPointer(), dst_reg);
AssertStackConsistency();
Mov(sp, dst_reg);
}
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
@ -2224,12 +2144,28 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual) {
Label skip_hook;
Label skip_hook, call_hook;
ExternalReference debug_is_active =
ExternalReference::debug_is_active_address(isolate());
Mov(x4, Operand(debug_is_active));
Ldrsb(x4, MemOperand(x4));
Cbz(x4, &skip_hook);
ExternalReference debug_hook_active =
ExternalReference::debug_hook_on_function_call_address(isolate());
Mov(x4, Operand(debug_hook_active));
Ldrsb(x4, MemOperand(x4));
Cbz(x4, &skip_hook);
Cbnz(x4, &call_hook);
Ldr(x4, FieldMemOperand(fun, JSFunction::kSharedFunctionInfoOffset));
Ldr(x4, FieldMemOperand(x4, SharedFunctionInfo::kDebugInfoOffset));
JumpIfSmi(x4, &skip_hook);
Ldr(x4, FieldMemOperand(x4, DebugInfo::kFlagsOffset));
Tst(x4, Operand(Smi::FromInt(DebugInfo::kBreakAtEntry)));
B(eq, &skip_hook);
bind(&call_hook);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@ -2284,7 +2220,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// We call indirectly through the code field in the function to
// allow recompilation to take effect without changing any of the
// call sites.
Register code = x4;
Register code = kJavaScriptCallCodeStartRegister;
Ldr(code, FieldMemOperand(function, JSFunction::kCodeOffset));
Add(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
if (flag == CALL_FUNCTION) {
@ -2343,16 +2279,6 @@ void MacroAssembler::InvokeFunction(Register function,
InvokeFunctionCode(function, no_reg, expected, actual, flag);
}
void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag) {
// Contract with called JS functions requires that function is passed in x1.
// (See FullCodeGenerator::Generate().)
LoadObject(x1, function);
InvokeFunction(x1, expected, actual, flag);
}
void TurboAssembler::TryConvertDoubleToInt64(Register result,
DoubleRegister double_input,
Label* done) {
@ -2402,7 +2328,7 @@ void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
void TurboAssembler::Prologue() {
Push(lr, fp, cp, x1);
Add(fp, StackPointer(), StandardFrameConstants::kFixedFrameSizeFromFp);
Add(fp, sp, StandardFrameConstants::kFixedFrameSizeFromFp);
}
void TurboAssembler::EnterFrame(StackFrame::Type type) {
@ -2414,21 +2340,20 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
Mov(type_reg, StackFrame::TypeToMarker(type));
Mov(code_reg, Operand(CodeObject()));
Push(lr, fp, type_reg, code_reg);
Add(fp, StackPointer(), InternalFrameConstants::kFixedFrameSizeFromFp);
Add(fp, sp, InternalFrameConstants::kFixedFrameSizeFromFp);
// sp[4] : lr
// sp[3] : fp
// sp[1] : type
// sp[0] : [code object]
} else if (type == StackFrame::WASM_COMPILED) {
DCHECK(csp.Is(StackPointer()));
Mov(type_reg, StackFrame::TypeToMarker(type));
Push(lr, fp);
Mov(fp, csp);
Mov(fp, sp);
Push(type_reg, padreg);
// csp[3] : lr
// csp[2] : fp
// csp[1] : type
// csp[0] : for alignment
// sp[3] : lr
// sp[2] : fp
// sp[1] : type
// sp[0] : for alignment
} else {
DCHECK_EQ(type, StackFrame::CONSTRUCT);
Mov(type_reg, StackFrame::TypeToMarker(type));
@ -2439,8 +2364,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
// The context pointer isn't part of the fixed frame, so add an extra slot
// to account for it.
Add(fp, StackPointer(),
TypedFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
Add(fp, sp, TypedFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
// sp[3] : lr
// sp[2] : fp
// sp[1] : type
@ -2450,15 +2374,12 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
if (type == StackFrame::WASM_COMPILED) {
DCHECK(csp.Is(StackPointer()));
Mov(csp, fp);
AssertStackConsistency();
Mov(sp, fp);
Pop(fp, lr);
} else {
// Drop the execution stack down to the frame pointer and restore
// the caller frame pointer and return address.
Mov(StackPointer(), fp);
AssertStackConsistency();
Mov(sp, fp);
Pop(fp, lr);
}
}
@ -2493,7 +2414,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
// Set up the new stack frame.
Push(lr, fp);
Mov(fp, StackPointer());
Mov(fp, sp);
Mov(scratch, StackFrame::TypeToMarker(frame_type));
Push(scratch, xzr);
Mov(scratch, Operand(CodeObject()));
@ -2540,13 +2461,11 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
// sp[8]: Extra space reserved for caller (if extra_space != 0).
// sp -> sp[0]: Space reserved for the return address.
DCHECK(csp.Is(StackPointer()));
// ExitFrame::GetStateForFramePointer expects to find the return address at
// the memory address immediately below the pointer stored in SPOffset.
// It is not safe to derive much else from SPOffset, because the size of the
// padding can vary.
Add(scratch, csp, kXRegSize);
Add(scratch, sp, kXRegSize);
Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
@ -2555,8 +2474,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
void MacroAssembler::LeaveExitFrame(bool restore_doubles,
const Register& scratch,
const Register& scratch2) {
DCHECK(csp.Is(StackPointer()));
if (restore_doubles) {
ExitFrameRestoreFPRegs();
}
@ -2582,8 +2499,7 @@ void MacroAssembler::LeaveExitFrame(bool restore_doubles,
// fp[8]: CallerPC (lr)
// fp -> fp[0]: CallerFP (old fp)
// fp[...]: The rest of the frame.
Mov(csp, fp);
AssertStackConsistency();
Mov(sp, fp);
Pop(fp, lr);
}
@ -2752,7 +2668,7 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// registers are saved. The following registers are excluded:
// - x16 and x17 (ip0 and ip1) because they shouldn't be preserved outside of
// the macro assembler.
// - x31 (csp) because the system stack pointer doesn't need to be included
// - x31 (sp) because the system stack pointer doesn't need to be included
// in safepoint registers.
//
// This function implements the mapping of register code to index into the
@ -3052,7 +2968,7 @@ void MacroAssembler::PrintfNoPreserve(const char * format,
const CPURegister& arg3) {
// We cannot handle a caller-saved stack pointer. It doesn't make much sense
// in most cases anyway, so this restriction shouldn't be too serious.
DCHECK(!kCallerSaved.IncludesAliasOf(StackPointer()));
DCHECK(!kCallerSaved.IncludesAliasOf(sp));
// The provided arguments, and their proper procedure-call standard registers.
CPURegister args[kPrintfMaxArgCount] = {arg0, arg1, arg2, arg3};
@ -3164,12 +3080,6 @@ void MacroAssembler::PrintfNoPreserve(const char * format,
Bind(&after_data);
}
// We don't pass any arguments on the stack, but we still need to align the C
// stack pointer to a 16-byte boundary for PCS compliance.
if (!csp.Is(StackPointer())) {
Bic(csp, StackPointer(), 0xF);
}
CallPrintf(arg_count, pcs);
}
@ -3208,14 +3118,6 @@ void MacroAssembler::Printf(const char * format,
CPURegister arg1,
CPURegister arg2,
CPURegister arg3) {
// We can only print sp if it is the current stack pointer.
if (!csp.Is(StackPointer())) {
DCHECK(!csp.Aliases(arg0));
DCHECK(!csp.Aliases(arg1));
DCHECK(!csp.Aliases(arg2));
DCHECK(!csp.Aliases(arg3));
}
// Printf is expected to preserve all registers, so make sure that none are
// available as scratch registers until we've preserved them.
RegList old_tmp_list = TmpList()->list();
@ -3224,8 +3126,8 @@ void MacroAssembler::Printf(const char * format,
FPTmpList()->set_list(0);
// Preserve all caller-saved registers as well as NZCV.
// If csp is the stack pointer, PushCPURegList asserts that the size of each
// list is a multiple of 16 bytes.
// PushCPURegList asserts that the size of each list is a multiple of 16
// bytes.
PushCPURegList(kCallerSaved);
PushCPURegList(kCallerSavedV);
@ -3241,15 +3143,15 @@ void MacroAssembler::Printf(const char * format,
// If any of the arguments are the current stack pointer, allocate a new
// register for them, and adjust the value to compensate for pushing the
// caller-saved registers.
bool arg0_sp = StackPointer().Aliases(arg0);
bool arg1_sp = StackPointer().Aliases(arg1);
bool arg2_sp = StackPointer().Aliases(arg2);
bool arg3_sp = StackPointer().Aliases(arg3);
bool arg0_sp = sp.Aliases(arg0);
bool arg1_sp = sp.Aliases(arg1);
bool arg2_sp = sp.Aliases(arg2);
bool arg3_sp = sp.Aliases(arg3);
if (arg0_sp || arg1_sp || arg2_sp || arg3_sp) {
// Allocate a register to hold the original stack pointer value, to pass
// to PrintfNoPreserve as an argument.
Register arg_sp = temps.AcquireX();
Add(arg_sp, StackPointer(),
Add(arg_sp, sp,
kCallerSaved.TotalSizeInBytes() + kCallerSavedV.TotalSizeInBytes());
if (arg0_sp) arg0 = Register::Create(arg_sp.code(), arg0.SizeInBits());
if (arg1_sp) arg1 = Register::Create(arg_sp.code(), arg1.SizeInBits());
@ -3302,7 +3204,7 @@ CPURegister UseScratchRegisterScope::AcquireNextAvailable(
CPURegList* available) {
CHECK(!available->IsEmpty());
CPURegister result = available->PopLowestIndex();
DCHECK(!AreAliased(result, xzr, csp));
DCHECK(!AreAliased(result, xzr, sp));
return result;
}
@ -3359,6 +3261,14 @@ InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
}
}
void TurboAssembler::ComputeCodeStartAddress(const Register& rd) {
// We can use adr to load a pc relative location.
adr(rd, -pc_offset());
}
void TurboAssembler::ResetSpeculationPoisonRegister() {
Mov(kSpeculationPoisonRegister, -1);
}
#undef __

View File

@ -47,12 +47,15 @@ namespace internal {
#define kJSFunctionRegister x1
#define kContextRegister cp
#define kAllocateSizeRegister x1
#define kSpeculationPoisonRegister x18
#define kInterpreterAccumulatorRegister x0
#define kInterpreterBytecodeOffsetRegister x19
#define kInterpreterBytecodeArrayRegister x20
#define kInterpreterDispatchTableRegister x21
#define kJavaScriptCallArgCountRegister x0
#define kJavaScriptCallCodeStartRegister x2
#define kJavaScriptCallNewTargetRegister x3
#define kOffHeapTrampolineRegister ip0
#define kRuntimeCallFunctionRegister x1
#define kRuntimeCallArgCountRegister x0
@ -254,6 +257,10 @@ class TurboAssembler : public Assembler {
void Move(Register dst, Handle<HeapObject> x);
void Move(Register dst, Smi* src);
// Register swap. Note that the register operands should be distinct.
void Swap(Register lhs, Register rhs);
void Swap(VRegister lhs, VRegister rhs);
// NEON by element instructions.
#define NEON_BYELEMENT_MACRO_LIST(V) \
V(fmla, Fmla) \
@ -549,6 +556,11 @@ class TurboAssembler : public Assembler {
void Cbnz(const Register& rt, Label* label);
void Cbz(const Register& rt, Label* label);
inline void Dmb(BarrierDomain domain, BarrierType type);
inline void Dsb(BarrierDomain domain, BarrierType type);
inline void Isb();
inline void Csdb();
bool AllowThisStubCall(CodeStub* stub);
void CallStubDelayed(CodeStub* stub);
void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
@ -581,20 +593,6 @@ class TurboAssembler : public Assembler {
// Print a message to stderr and abort execution.
void Abort(AbortReason reason);
// If emit_debug_code() is true, emit a run-time check to ensure that
// StackPointer() does not point below the system stack pointer.
//
// Whilst it is architecturally legal for StackPointer() to point below csp,
// it can be evidence of a potential bug because the ABI forbids accesses
// below csp.
//
// If StackPointer() is the system stack pointer (csp), then csp will be
// dereferenced to cause the processor (or simulator) to abort if it is not
// properly aligned.
//
// If emit_debug_code() is false, this emits no code.
void AssertStackConsistency();
// Remaining instructions are simple pass-through calls to the assembler.
inline void Asr(const Register& rd, const Register& rn, unsigned shift);
inline void Asr(const Register& rd, const Register& rn, const Register& rm);
@ -614,9 +612,6 @@ class TurboAssembler : public Assembler {
static CPURegList DefaultTmpList();
static CPURegList DefaultFPTmpList();
// Return the stack pointer.
inline const Register& StackPointer() const { return csp; }
// Move macros.
inline void Mvn(const Register& rd, uint64_t imm);
void Mvn(const Register& rd, const Operand& operand);
@ -650,9 +645,11 @@ class TurboAssembler : public Assembler {
inline void Cmp(const Register& rn, const Operand& operand);
inline void Subs(const Register& rd, const Register& rn,
const Operand& operand);
void Csel(const Register& rd, const Register& rn, const Operand& operand,
Condition cond);
// Emits a runtime assert that the CSP is aligned.
void AssertCspAligned();
// Emits a runtime assert that the stack pointer is aligned.
void AssertSpAligned();
// Copy slot_count stack slots from the stack offset specified by src to
// the stack offset specified by dst. The offsets and count are expressed in
@ -687,17 +684,14 @@ class TurboAssembler : public Assembler {
// Load a literal from the inline constant pool.
inline void Ldr(const CPURegister& rt, const Operand& imm);
// Helper function for double immediate.
inline void Ldr(const CPURegister& rt, double imm);
// Claim or drop stack space without actually accessing memory.
//
// In debug mode, both of these will write invalid data into the claimed or
// dropped space.
//
// If the current stack pointer (according to StackPointer()) is csp, then it
// must be aligned to 16 bytes and the size claimed or dropped must be a
// multiple of 16 bytes.
// The stack pointer must be aligned to 16 bytes and the size claimed or
// dropped must be a multiple of 16 bytes.
//
// Note that unit_size must be specified in bytes. For variants which take a
// Register count, the unit size must be a power of two.
@ -724,26 +718,6 @@ class TurboAssembler : public Assembler {
// Push a single argument, with padding, to the stack.
inline void PushArgument(const Register& arg);
// Re-synchronizes the system stack pointer (csp) with the current stack
// pointer (according to StackPointer()).
//
// This method asserts that StackPointer() is not csp, since the call does
// not make sense in that context.
inline void SyncSystemStackPointer();
// Push the system stack pointer (csp) down to allow the same to be done to
// the current stack pointer (according to StackPointer()). This must be
// called _before_ accessing the memory.
//
// This is necessary when pushing or otherwise adding things to the stack, to
// satisfy the AAPCS64 constraint that the memory below the system stack
// pointer is not accessed. The amount pushed will be increased as necessary
// to ensure csp remains aligned to 16 bytes.
//
// This method asserts that StackPointer() is not csp, since the call does
// not make sense in that context.
inline void BumpSystemStackPointer(const Operand& space);
// Add and sub macros.
inline void Add(const Register& rd, const Register& rn,
const Operand& operand);
@ -778,11 +752,6 @@ class TurboAssembler : public Assembler {
// The stack pointer must be aligned to 16 bytes on entry and the total size
// of the specified registers must also be a multiple of 16 bytes.
//
// Even if the current stack pointer is not the system stack pointer (csp),
// Push (and derived methods) will still modify the system stack pointer in
// order to comply with ABI rules about accessing memory below the system
// stack pointer.
//
// Other than the registers passed into Pop, the stack pointer and (possibly)
// the system stack pointer, these methods do not modify any other registers.
void Push(const CPURegister& src0, const CPURegister& src1 = NoReg,
@ -1011,17 +980,13 @@ class TurboAssembler : public Assembler {
inline void Clz(const Register& rd, const Register& rn);
// Poke 'src' onto the stack. The offset is in bytes.
//
// If the current stack pointer (according to StackPointer()) is csp, then
// csp must be aligned to 16 bytes.
// Poke 'src' onto the stack. The offset is in bytes. The stack pointer must
// be 16 byte aligned.
void Poke(const CPURegister& src, const Operand& offset);
// Poke 'src1' and 'src2' onto the stack. The values written will be adjacent
// with 'src2' at a higher address than 'src1'. The offset is in bytes.
//
// If the current stack pointer (according to StackPointer()) is csp, then
// csp must be aligned to 16 bytes.
// with 'src2' at a higher address than 'src1'. The offset is in bytes. The
// stack pointer must be 16 byte aligned.
void PokePair(const CPURegister& src1, const CPURegister& src2, int offset);
inline void Sbfx(const Register& rd, const Register& rn, unsigned lsb,
@ -1047,7 +1012,9 @@ class TurboAssembler : public Assembler {
void CanonicalizeNaN(const VRegister& dst, const VRegister& src);
void CanonicalizeNaN(const VRegister& reg) { CanonicalizeNaN(reg, reg); }
inline void CmovX(const Register& rd, const Register& rn, Condition cond);
inline void Cset(const Register& rd, Condition cond);
inline void Csetm(const Register& rd, Condition cond);
inline void Fccmp(const VRegister& fn, const VRegister& fm, StatusFlags nzcv,
Condition cond);
inline void Csinc(const Register& rd, const Register& rn, const Register& rm,
@ -1233,6 +1200,12 @@ class TurboAssembler : public Assembler {
inline void Fcvtas(const Register& rd, const VRegister& fn);
inline void Fcvtau(const Register& rd, const VRegister& fn);
// Compute the start of the generated instruction stream from the current PC.
// This is an alternative to embedding the {CodeObject} handle as a reference.
void ComputeCodeStartAddress(const Register& rd);
void ResetSpeculationPoisonRegister();
protected:
// The actual Push and Pop implementations. These don't generate any code
// other than that required for the push or pop. This allows
@ -1257,8 +1230,8 @@ class TurboAssembler : public Assembler {
// Call Printf. On a native build, a simple call will be generated, but if the
// simulator is being used then a suitable pseudo-instruction is used. The
// arguments and stack (csp) must be prepared by the caller as for a normal
// AAPCS64 call to 'printf'.
// arguments and stack must be prepared by the caller as for a normal AAPCS64
// call to 'printf'.
//
// The 'args' argument should point to an array of variable arguments in their
// proper PCS registers (and in calling order). The argument registers can
@ -1326,8 +1299,6 @@ class MacroAssembler : public TurboAssembler {
inline void Ccmn(const Register& rn, const Operand& operand, StatusFlags nzcv,
Condition cond);
void Csel(const Register& rd, const Register& rn, const Operand& operand,
Condition cond);
#define DECLARE_FUNCTION(FN, OP) \
inline void FN(const Register& rs, const Register& rt, const Register& rn);
@ -1344,14 +1315,10 @@ class MacroAssembler : public TurboAssembler {
inline void Cinc(const Register& rd, const Register& rn, Condition cond);
inline void Cinv(const Register& rd, const Register& rn, Condition cond);
inline void CzeroX(const Register& rd, Condition cond);
inline void CmovX(const Register& rd, const Register& rn, Condition cond);
inline void Csetm(const Register& rd, Condition cond);
inline void Csinv(const Register& rd, const Register& rn, const Register& rm,
Condition cond);
inline void Csneg(const Register& rd, const Register& rn, const Register& rm,
Condition cond);
inline void Dmb(BarrierDomain domain, BarrierType type);
inline void Dsb(BarrierDomain domain, BarrierType type);
inline void Extr(const Register& rd, const Register& rn, const Register& rm,
unsigned lsb);
inline void Fcsel(const VRegister& fd, const VRegister& fn,
@ -1394,7 +1361,6 @@ class MacroAssembler : public TurboAssembler {
const VRegister& fm, const VRegister& fa);
inline void Hint(SystemHint code);
inline void Hlt(int code);
inline void Isb();
inline void Ldnp(const CPURegister& rt, const CPURegister& rt2,
const MemOperand& src);
inline void Movk(const Register& rd, uint64_t imm, int shift = -1);
@ -1641,17 +1607,13 @@ class MacroAssembler : public TurboAssembler {
};
// Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
//
// If the current stack pointer (according to StackPointer()) is csp, then
// csp must be aligned to 16 bytes.
// The stack pointer must be aligned to 16 bytes.
void Peek(const CPURegister& dst, const Operand& offset);
// Peek at two values on the stack, and put them in 'dst1' and 'dst2'. The
// values peeked will be adjacent, with the value in 'dst2' being from a
// higher address than 'dst1'. The offset is in bytes.
//
// If the current stack pointer (according to StackPointer()) is csp, then
// csp must be aligned to 16 bytes.
// higher address than 'dst1'. The offset is in bytes. The stack pointer must
// be aligned to 16 bytes.
void PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset);
// Variants of Claim and Drop, where the 'count' parameter is a SMI held in a
@ -1704,10 +1666,6 @@ class MacroAssembler : public TurboAssembler {
// thus come from higher addresses.
void PopCalleeSavedRegisters();
// Align csp for a frame, as per ActivationFrameAlignment, and make it the
// current stack pointer.
inline void AlignAndSetCSPForFrame();
// Helpers ------------------------------------------------------------------
static int SafepointRegisterStackIndex(int reg_code);
@ -1770,11 +1728,6 @@ class MacroAssembler : public TurboAssembler {
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object);
void JumpIfHeapNumber(Register object, Label* on_heap_number,
SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
void JumpIfNotHeapNumber(Register object, Label* on_not_heap_number,
SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
// Try to represent a double as a signed 64-bit int.
// This succeeds if the result compares equal to the input, so inputs of -0.0
// are represented as 0 and handled as a success.
@ -1817,6 +1770,9 @@ class MacroAssembler : public TurboAssembler {
void JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame = false);
// Generates a trampoline to jump to the off-heap instruction stream.
void JumpToInstructionStream(const InstructionStream* stream);
// Registers used through the invocation chain are hard-coded.
// We force passing the parameters to ensure the contracts are correctly
// honoured by the caller.
@ -1841,9 +1797,6 @@ class MacroAssembler : public TurboAssembler {
const ParameterCount& actual, InvokeFlag flag);
void InvokeFunction(Register function, const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag);
void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag);
// ---- Code generation helpers ----
@ -1940,12 +1893,12 @@ class MacroAssembler : public TurboAssembler {
// Set up a stack frame and registers as follows:
// fp[8]: CallerPC (lr)
// fp -> fp[0]: CallerFP (old fp)
// fp[-8]: SPOffset (new csp)
// fp[-8]: SPOffset (new sp)
// fp[-16]: CodeObject()
// fp[-16 - fp-size]: Saved doubles, if saved_doubles is true.
// csp[8]: Memory reserved for the caller if extra_space != 0.
// sp[8]: Memory reserved for the caller if extra_space != 0.
// Alignment padding, if necessary.
// csp -> csp[0]: Space reserved for the return address.
// sp -> sp[0]: Space reserved for the return address.
//
// This function also stores the new frame information in the top frame, so
// that the new frame becomes the current frame.
@ -1960,8 +1913,6 @@ class MacroAssembler : public TurboAssembler {
// * Preserved doubles are restored (if restore_doubles is true).
// * The frame information is removed from the top frame.
// * The exit frame is dropped.
//
// The stack pointer must be csp on entry.
void LeaveExitFrame(bool save_doubles, const Register& scratch,
const Register& scratch2);
@ -2030,11 +1981,6 @@ class MacroAssembler : public TurboAssembler {
// (such as %e, %f or %g) are VRegisters, and that arguments for integer
// placeholders are Registers.
//
// At the moment it is only possible to print the value of csp if it is the
// current stack pointer. Otherwise, the MacroAssembler will automatically
// update csp on every push (using BumpSystemStackPointer), so determining its
// value is difficult.
//
// Format placeholders that refer to more than one argument, or to a specific
// argument, are not supported. This includes formats like "%1$d" or "%.*d".
//
@ -2169,6 +2115,7 @@ class UseScratchRegisterScope {
Register AcquireX() { return AcquireNextAvailable(available_).X(); }
VRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); }
VRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); }
VRegister AcquireQ() { return AcquireNextAvailable(availablefp_).Q(); }
VRegister AcquireV(VectorFormat format) {
return VRegister::Create(AcquireNextAvailable(availablefp_).code(), format);
}
@ -2210,7 +2157,7 @@ class InlineSmiCheckInfo {
// Use MacroAssembler::InlineData to emit information about patchable inline
// SMI checks. The caller may specify 'reg' as NoReg and an unbound 'site' to
// indicate that there is no inline SMI check. Note that 'reg' cannot be csp.
// indicate that there is no inline SMI check. Note that 'reg' cannot be sp.
//
// The generated patch information can be read using the InlineSMICheckInfo
// class.
@ -2230,8 +2177,8 @@ class InlineSmiCheckInfo {
// Fields in the data encoded by InlineData.
// A width of 5 (Rd_width) for the SMI register preclues the use of csp,
// since kSPRegInternalCode is 63. However, csp should never hold a SMI or be
// A width of 5 (Rd_width) for the SMI register precludes the use of sp,
// since kSPRegInternalCode is 63. However, sp should never hold a SMI or be
// used in a patchable check. The Emit() method checks this.
//
// Note that the total size of the fields is restricted by the underlying

View File

@ -626,16 +626,15 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
}
const char* Simulator::xreg_names[] = {
"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8",
"x9", "x10", "x11", "x12", "x13", "x14", "x15", "ip0", "ip1",
"x18", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26",
"cp", "x28", "fp", "lr", "xzr", "csp"};
"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", "x9", "x10",
"x11", "x12", "x13", "x14", "x15", "ip0", "ip1", "x18", "x19", "x20", "x21",
"x22", "x23", "x24", "x25", "x26", "cp", "x28", "fp", "lr", "xzr", "sp"};
const char* Simulator::wreg_names[] = {
"w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8",
"w9", "w10", "w11", "w12", "w13", "w14", "w15", "w16", "w17",
"w18", "w19", "w20", "w21", "w22", "w23", "w24", "w25", "w26",
"wcp", "w28", "wfp", "wlr", "wzr", "wcsp"};
"wcp", "w28", "wfp", "wlr", "wzr", "wsp"};
const char* Simulator::sreg_names[] = {
"s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
@ -768,7 +767,7 @@ int Simulator::CodeFromName(const char* name) {
return i;
}
}
if ((strcmp("csp", name) == 0) || (strcmp("wcsp", name) == 0)) {
if ((strcmp("sp", name) == 0) || (strcmp("wsp", name) == 0)) {
return kSPRegInternalCode;
}
return -1;
@ -1450,7 +1449,7 @@ void Simulator::VisitUnconditionalBranch(Instruction* instr) {
switch (instr->Mask(UnconditionalBranchMask)) {
case BL:
set_lr(instr->following());
// Fall through.
V8_FALLTHROUGH;
case B:
set_pc(instr->ImmPCOffsetTarget());
break;
@ -1478,7 +1477,7 @@ void Simulator::VisitUnconditionalBranchToRegister(Instruction* instr) {
// this, but if we do trap to allow debugging.
Debug();
}
// Fall through.
V8_FALLTHROUGH;
}
case BR:
case RET: set_pc(target); break;
@ -1630,7 +1629,7 @@ void Simulator::LogicalHelper(Instruction* instr, T op2) {
// Switch on the logical operation, stripping out the NOT bit, as it has a
// different meaning for logical immediate instructions.
switch (instr->Mask(LogicalOpMask & ~NOT)) {
case ANDS: update_flags = true; // Fall through.
case ANDS: update_flags = true; V8_FALLTHROUGH;
case AND: result = op1 & op2; break;
case ORR: result = op1 | op2; break;
case EOR: result = op1 ^ op2; break;
@ -2956,7 +2955,9 @@ void Simulator::VisitSystem(Instruction* instr) {
} else if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
DCHECK(instr->Mask(SystemHintMask) == HINT);
switch (instr->ImmHint()) {
case NOP: break;
case NOP:
case CSDB:
break;
default: UNIMPLEMENTED();
}
} else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) {
@ -2996,15 +2997,15 @@ bool Simulator::GetValue(const char* desc, int64_t* value) {
bool Simulator::PrintValue(const char* desc) {
if (strcmp(desc, "csp") == 0) {
if (strcmp(desc, "sp") == 0) {
DCHECK(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
PrintF(stream_, "%s csp:%s 0x%016" PRIx64 "%s\n",
clr_reg_name, clr_reg_value, xreg(31, Reg31IsStackPointer), clr_normal);
PrintF(stream_, "%s sp:%s 0x%016" PRIx64 "%s\n", clr_reg_name,
clr_reg_value, xreg(31, Reg31IsStackPointer), clr_normal);
return true;
} else if (strcmp(desc, "wcsp") == 0) {
} else if (strcmp(desc, "wsp") == 0) {
DCHECK(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
PrintF(stream_, "%s wcsp:%s 0x%08" PRIx32 "%s\n",
clr_reg_name, clr_reg_value, wreg(31, Reg31IsStackPointer), clr_normal);
PrintF(stream_, "%s wsp:%s 0x%08" PRIx32 "%s\n", clr_reg_name,
clr_reg_value, wreg(31, Reg31IsStackPointer), clr_normal);
return true;
}
@ -4396,15 +4397,18 @@ void Simulator::NEONLoadStoreMultiStructHelper(const Instruction* instr,
case NEON_LD1_4v:
case NEON_LD1_4v_post:
ld1(vf, vreg(reg[3]), addr[3]);
count++; // Fall through.
count++;
V8_FALLTHROUGH;
case NEON_LD1_3v:
case NEON_LD1_3v_post:
ld1(vf, vreg(reg[2]), addr[2]);
count++; // Fall through.
count++;
V8_FALLTHROUGH;
case NEON_LD1_2v:
case NEON_LD1_2v_post:
ld1(vf, vreg(reg[1]), addr[1]);
count++; // Fall through.
count++;
V8_FALLTHROUGH;
case NEON_LD1_1v:
case NEON_LD1_1v_post:
ld1(vf, vreg(reg[0]), addr[0]);
@ -4412,15 +4416,18 @@ void Simulator::NEONLoadStoreMultiStructHelper(const Instruction* instr,
case NEON_ST1_4v:
case NEON_ST1_4v_post:
st1(vf, vreg(reg[3]), addr[3]);
count++; // Fall through.
count++;
V8_FALLTHROUGH;
case NEON_ST1_3v:
case NEON_ST1_3v_post:
st1(vf, vreg(reg[2]), addr[2]);
count++; // Fall through.
count++;
V8_FALLTHROUGH;
case NEON_ST1_2v:
case NEON_ST1_2v_post:
st1(vf, vreg(reg[1]), addr[1]);
count++; // Fall through.
count++;
V8_FALLTHROUGH;
case NEON_ST1_1v:
case NEON_ST1_1v_post:
st1(vf, vreg(reg[0]), addr[0]);
@ -4533,7 +4540,8 @@ void Simulator::NEONLoadStoreSingleStructHelper(const Instruction* instr,
case NEON_LD3_b_post:
case NEON_LD4_b:
case NEON_LD4_b_post:
do_load = true; // Fall through.
do_load = true;
V8_FALLTHROUGH;
case NEON_ST1_b:
case NEON_ST1_b_post:
case NEON_ST2_b:
@ -4552,7 +4560,8 @@ void Simulator::NEONLoadStoreSingleStructHelper(const Instruction* instr,
case NEON_LD3_h_post:
case NEON_LD4_h:
case NEON_LD4_h_post:
do_load = true; // Fall through.
do_load = true;
V8_FALLTHROUGH;
case NEON_ST1_h:
case NEON_ST1_h_post:
case NEON_ST2_h:
@ -4572,7 +4581,8 @@ void Simulator::NEONLoadStoreSingleStructHelper(const Instruction* instr,
case NEON_LD3_s_post:
case NEON_LD4_s:
case NEON_LD4_s_post:
do_load = true; // Fall through.
do_load = true;
V8_FALLTHROUGH;
case NEON_ST1_s:
case NEON_ST1_s_post:
case NEON_ST2_s:

View File

@ -646,6 +646,7 @@ class LogicVRegister {
class Simulator : public DecoderVisitor, public SimulatorBase {
public:
static void SetRedirectInstruction(Instruction* instruction);
static bool ICacheMatch(void* one, void* two) { return false; }
static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
size_t size) {
USE(i_cache);

View File

@ -3,9 +3,7 @@ set noparent
ahaas@chromium.org
bradnelson@chromium.org
clemensh@chromium.org
mtrofin@chromium.org
mstarzinger@chromium.org
rossberg@chromium.org
titzer@chromium.org
# COMPONENT: Blink>JavaScript>WebAssembly

View File

@ -21,8 +21,7 @@
#include "src/parsing/scanner-character-streams.h"
#include "src/parsing/scanner.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-module-builder.h"
#include "src/wasm/wasm-objects-inl.h"
@ -66,18 +65,21 @@ bool AreStdlibMembersValid(Isolate* isolate, Handle<JSReceiver> stdlib,
Handle<Object> value = JSReceiver::GetDataProperty(stdlib, name);
if (!value->IsNaN()) return false;
}
#define STDLIB_MATH_FUNC(fname, FName, ignore1, ignore2) \
if (members.Contains(wasm::AsmJsParser::StandardMember::kMath##FName)) { \
members.Remove(wasm::AsmJsParser::StandardMember::kMath##FName); \
Handle<Name> name(isolate->factory()->InternalizeOneByteString( \
STATIC_CHAR_VECTOR(#fname))); \
Handle<Object> value = StdlibMathMember(isolate, stdlib, name); \
if (!value->IsJSFunction()) return false; \
Handle<JSFunction> func = Handle<JSFunction>::cast(value); \
if (func->shared()->code() != \
isolate->builtins()->builtin(Builtins::kMath##FName)) { \
return false; \
} \
#define STDLIB_MATH_FUNC(fname, FName, ignore1, ignore2) \
if (members.Contains(wasm::AsmJsParser::StandardMember::kMath##FName)) { \
members.Remove(wasm::AsmJsParser::StandardMember::kMath##FName); \
Handle<Name> name(isolate->factory()->InternalizeOneByteString( \
STATIC_CHAR_VECTOR(#fname))); \
Handle<Object> value = StdlibMathMember(isolate, stdlib, name); \
if (!value->IsJSFunction()) return false; \
SharedFunctionInfo* shared = Handle<JSFunction>::cast(value)->shared(); \
if (shared->HasLazyDeserializationBuiltinId()) { \
if (shared->lazy_deserialization_builtin_id() != Builtins::kMath##FName) \
return false; \
} else if (shared->code() != \
isolate->builtins()->builtin(Builtins::kMath##FName)) { \
return false; \
} \
}
STDLIB_MATH_FUNCTION_LIST(STDLIB_MATH_FUNC)
#undef STDLIB_MATH_FUNC
@ -284,11 +286,12 @@ CompilationJob::Status AsmJsCompilationJob::FinalizeJobImpl(Isolate* isolate) {
wasm::ErrorThrower thrower(isolate, "AsmJs::Compile");
Handle<WasmModuleObject> compiled =
SyncCompileTranslatedAsmJs(
isolate, &thrower,
wasm::ModuleWireBytes(module_->begin(), module_->end()),
parse_info()->script(),
Vector<const byte>(asm_offsets_->begin(), asm_offsets_->size()))
isolate->wasm_engine()
->SyncCompileTranslatedAsmJs(
isolate, &thrower,
wasm::ModuleWireBytes(module_->begin(), module_->end()),
parse_info()->script(),
Vector<const byte>(asm_offsets_->begin(), asm_offsets_->size()))
.ToHandleChecked();
DCHECK(!thrower.error());
compile_time_ = compile_timer.Elapsed().InMillisecondsF();
@ -389,7 +392,8 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(Isolate* isolate,
wasm::ErrorThrower thrower(isolate, "AsmJs::Instantiate");
MaybeHandle<Object> maybe_module_object =
wasm::SyncInstantiate(isolate, &thrower, module, foreign, memory);
isolate->wasm_engine()->SyncInstantiate(isolate, &thrower, module,
foreign, memory);
if (maybe_module_object.is_null()) {
// An exception caused by the module start function will be set as pending
// and bypass the {ErrorThrower}, this happens in case of a stack overflow.

View File

@ -745,6 +745,12 @@ void AsmJsParser::ValidateFunction() {
CachedVector<AsmType*> params(cached_asm_type_p_vectors_);
ValidateFunctionParams(&params);
// Check against limit on number of parameters.
if (params.size() >= kV8MaxWasmFunctionParams) {
FAIL("Number of parameters exceeds internal limit");
}
CachedVector<ValueType> locals(cached_valuetype_vectors_);
ValidateFunctionLocals(params.size(), &locals);

View File

@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_ASMJS_SWITCH_LOGIC_H
#define V8_ASMJS_SWITCH_LOGIC_H
#ifndef V8_ASMJS_SWITCH_LOGIC_H_
#define V8_ASMJS_SWITCH_LOGIC_H_
#include "src/globals.h"
#include "src/zone/zone-containers.h"
@ -30,4 +30,4 @@ V8_EXPORT_PRIVATE CaseNode* OrderCases(ZoneVector<int>* cases, Zone* zone);
} // namespace internal
} // namespace v8
#endif // V8_ASMJS_SWITCH_LOGIC_H
#endif // V8_ASMJS_SWITCH_LOGIC_H_

View File

@ -176,12 +176,12 @@ AssemblerBase::~AssemblerBase() {
if (own_buffer_) DeleteArray(buffer_);
}
void AssemblerBase::FlushICache(Isolate* isolate, void* start, size_t size) {
void AssemblerBase::FlushICache(void* start, size_t size) {
if (size == 0) return;
#if defined(USE_SIMULATOR)
base::LockGuard<base::Mutex> lock_guard(isolate->simulator_i_cache_mutex());
Simulator::FlushICache(isolate->simulator_i_cache(), start, size);
base::LockGuard<base::Mutex> lock_guard(Simulator::i_cache_mutex());
Simulator::FlushICache(Simulator::i_cache(), start, size);
#else
CpuFeatures::FlushICache(start, size);
#endif // USE_SIMULATOR
@ -195,9 +195,6 @@ void AssemblerBase::Print(Isolate* isolate) {
// -----------------------------------------------------------------------------
// Implementation of PredictableCodeSizeScope
PredictableCodeSizeScope::PredictableCodeSizeScope(AssemblerBase* assembler)
: PredictableCodeSizeScope(assembler, -1) {}
PredictableCodeSizeScope::PredictableCodeSizeScope(AssemblerBase* assembler,
int expected_size)
: assembler_(assembler),
@ -208,10 +205,7 @@ PredictableCodeSizeScope::PredictableCodeSizeScope(AssemblerBase* assembler,
}
PredictableCodeSizeScope::~PredictableCodeSizeScope() {
// TODO(svenpanne) Remove the 'if' when everything works.
if (expected_size_ >= 0) {
CHECK_EQ(expected_size_, assembler_->pc_offset() - start_offset_);
}
CHECK_EQ(expected_size_, assembler_->pc_offset() - start_offset_);
assembler_->set_predictable_code_size(old_value_);
}
@ -301,16 +295,16 @@ const int kLastChunkTagBits = 1;
const int kLastChunkTagMask = 1;
const int kLastChunkTag = 1;
void RelocInfo::set_wasm_context_reference(Isolate* isolate, Address address,
void RelocInfo::set_wasm_context_reference(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsWasmContextReference(rmode_));
set_embedded_address(isolate, address, icache_flush_mode);
set_embedded_address(address, icache_flush_mode);
}
void RelocInfo::set_global_handle(Isolate* isolate, Address address,
void RelocInfo::set_global_handle(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, WASM_GLOBAL_HANDLE);
set_embedded_address(isolate, address, icache_flush_mode);
set_embedded_address(address, icache_flush_mode);
}
Address RelocInfo::wasm_call_address() const {
@ -318,10 +312,10 @@ Address RelocInfo::wasm_call_address() const {
return Assembler::target_address_at(pc_, constant_pool_);
}
void RelocInfo::set_wasm_call_address(Isolate* isolate, Address address,
void RelocInfo::set_wasm_call_address(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, WASM_CALL);
Assembler::set_target_address_at(isolate, pc_, constant_pool_, address,
Assembler::set_target_address_at(pc_, constant_pool_, address,
icache_flush_mode);
}
@ -341,17 +335,16 @@ Address RelocInfo::wasm_context_reference() const {
}
void RelocInfo::update_wasm_function_table_size_reference(
Isolate* isolate, uint32_t old_size, uint32_t new_size,
ICacheFlushMode icache_flush_mode) {
uint32_t old_size, uint32_t new_size, ICacheFlushMode icache_flush_mode) {
DCHECK(IsWasmFunctionTableSizeReference(rmode_));
set_embedded_size(isolate, new_size, icache_flush_mode);
set_embedded_size(new_size, icache_flush_mode);
}
void RelocInfo::set_target_address(Isolate* isolate, Address target,
void RelocInfo::set_target_address(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
Assembler::set_target_address_at(isolate, pc_, constant_pool_, target,
Assembler::set_target_address_at(pc_, constant_pool_, target,
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr &&
IsCodeTarget(rmode_)) {
@ -449,7 +442,6 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
}
}
last_pc_ = rinfo->pc();
last_mode_ = rmode;
#ifdef DEBUG
DCHECK_LE(begin_pos - pos_, kMaxSize);
#endif
@ -561,7 +553,8 @@ void RelocIterator::next() {
done_ = true;
}
RelocIterator::RelocIterator(Code* code, int mode_mask) {
RelocIterator::RelocIterator(Code* code, int mode_mask)
: mode_mask_(mode_mask) {
rinfo_.host_ = code;
rinfo_.pc_ = code->instruction_start();
rinfo_.data_ = 0;
@ -569,35 +562,30 @@ RelocIterator::RelocIterator(Code* code, int mode_mask) {
// Relocation info is read backwards.
pos_ = code->relocation_start() + code->relocation_size();
end_ = code->relocation_start();
done_ = false;
mode_mask_ = mode_mask;
if (mode_mask_ == 0) pos_ = end_;
next();
}
RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) {
RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask)
: mode_mask_(mode_mask) {
rinfo_.pc_ = desc.buffer;
rinfo_.data_ = 0;
// Relocation info is read backwards.
pos_ = desc.buffer + desc.buffer_size;
end_ = pos_ - desc.reloc_size;
done_ = false;
mode_mask_ = mode_mask;
if (mode_mask_ == 0) pos_ = end_;
next();
}
RelocIterator::RelocIterator(Vector<byte> instructions,
Vector<const byte> reloc_info, Address const_pool,
int mode_mask) {
int mode_mask)
: mode_mask_(mode_mask) {
rinfo_.pc_ = instructions.start();
rinfo_.data_ = 0;
rinfo_.constant_pool_ = const_pool;
rinfo_.flags_ = RelocInfo::kInNativeWasmCode;
// Relocation info is read backwards.
pos_ = reloc_info.start() + reloc_info.size();
end_ = reloc_info.start();
done_ = false;
mode_mask_ = mode_mask;
if (mode_mask_ == 0) pos_ = end_;
next();
}
@ -606,7 +594,7 @@ RelocIterator::RelocIterator(Vector<byte> instructions,
// Implementation of RelocInfo
#ifdef DEBUG
bool RelocInfo::RequiresRelocation(Isolate* isolate, const CodeDesc& desc) {
bool RelocInfo::RequiresRelocation(const CodeDesc& desc) {
// Ensure there are no code targets or embedded objects present in the
// deoptimization entries, they would require relocation after code
// generation.
@ -621,10 +609,8 @@ bool RelocInfo::RequiresRelocation(Isolate* isolate, const CodeDesc& desc) {
#ifdef ENABLE_DISASSEMBLER
const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
switch (rmode) {
case NONE32:
return "no reloc 32";
case NONE64:
return "no reloc 64";
case NONE:
return "no reloc";
case EMBEDDED_OBJECT:
return "embedded object";
case CODE_TARGET:
@ -686,9 +672,21 @@ void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT
<< ") (" << static_cast<const void*>(target_external_reference())
<< ")";
} else if (IsCodeTarget(rmode_)) {
Code* code = Code::GetCodeFromTargetAddress(target_address());
os << " (" << Code::Kind2String(code->kind()) << ") ("
<< static_cast<const void*>(target_address()) << ")";
const Address code_target = target_address();
if (flags_ & kInNativeWasmCode) {
os << " (wasm trampoline) ";
} else {
Code* code = Code::GetCodeFromTargetAddress(code_target);
DCHECK(code->IsCode());
os << " (" << Code::Kind2String(code->kind());
if (Builtins::IsBuiltin(code)) {
os << " " << Builtins::name(code->builtin_index());
} else if (code->kind() == Code::STUB) {
os << " " << CodeStub::MajorName(CodeStub::GetMajorKey(code));
}
os << ") ";
}
os << " (" << static_cast<const void*>(target_address()) << ")";
} else if (IsRuntimeEntry(rmode_) && isolate->deoptimizer_data() != nullptr) {
// Depotimization bailouts are stored as runtime entries.
int id = Deoptimizer::GetDeoptimizationId(
@ -744,8 +742,7 @@ void RelocInfo::Verify(Isolate* isolate) {
case WASM_GLOBAL_HANDLE:
case WASM_CALL:
case JS_TO_WASM_CALL:
case NONE32:
case NONE64:
case NONE:
break;
case NUMBER_OF_MODES:
case PC_JUMP:
@ -1465,6 +1462,12 @@ ExternalReference ExternalReference::copy_typed_array_elements_to_typed_array(
Redirect(isolate, FUNCTION_ADDR(CopyTypedArrayElementsToTypedArray)));
}
ExternalReference ExternalReference::copy_typed_array_elements_slice(
Isolate* isolate) {
return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(CopyTypedArrayElementsSlice)));
}
ExternalReference ExternalReference::try_internalize_string_function(
Isolate* isolate) {
return ExternalReference(Redirect(
@ -1877,22 +1880,5 @@ void Assembler::RequestHeapObject(HeapObjectRequest request) {
heap_object_requests_.push_front(request);
}
namespace {
int caller_saved_codes[kNumJSCallerSaved];
}
void SetUpJSCallerSavedCodeData() {
int i = 0;
for (int r = 0; r < kNumRegs; r++)
if ((kJSCallerSaved & (1 << r)) != 0) caller_saved_codes[i++] = r;
DCHECK_EQ(i, kNumJSCallerSaved);
}
int JSCallerSavedCode(int n) {
DCHECK(0 <= n && n < kNumJSCallerSaved);
return caller_saved_codes[n];
}
} // namespace internal
} // namespace v8

View File

@ -57,15 +57,12 @@ class ApiFunction;
namespace internal {
// Forward declarations.
class InstructionStream;
class Isolate;
class SCTableReference;
class SourcePosition;
class StatsCounter;
void SetUpJSCallerSavedCodeData();
// Return the code of the n-th saved register available to JavaScript.
int JSCallerSavedCode(int n);
// -----------------------------------------------------------------------------
// Optimization for far-jmp like instructions that can be replaced by shorter.
@ -162,7 +159,7 @@ class AssemblerBase: public Malloced {
static const int kMinimalBufferSize = 4*KB;
static void FlushICache(Isolate* isolate, void* start, size_t size);
static void FlushICache(void* start, size_t size);
protected:
// The buffer into which code and relocation info are generated. It could
@ -220,16 +217,14 @@ class DontEmitDebugCodeScope BASE_EMBEDDED {
// snapshot and the running VM.
class PredictableCodeSizeScope {
public:
explicit PredictableCodeSizeScope(AssemblerBase* assembler);
PredictableCodeSizeScope(AssemblerBase* assembler, int expected_size);
~PredictableCodeSizeScope();
void ExpectSize(int expected_size) { expected_size_ = expected_size; }
private:
AssemblerBase* assembler_;
int expected_size_;
int start_offset_;
bool old_value_;
AssemblerBase* const assembler_;
int const expected_size_;
int const start_offset_;
bool const old_value_;
};
@ -252,6 +247,8 @@ class CpuFeatureScope BASE_EMBEDDED {
#else
CpuFeatureScope(AssemblerBase* assembler, CpuFeature f,
CheckPolicy check = kCheckSupported) {}
// Define a destructor to avoid unused variable warnings.
~CpuFeatureScope() {}
#endif
};
@ -283,7 +280,7 @@ class CpuFeatures : public AllStatic {
return (supported_ & (1u << f)) != 0;
}
static inline bool SupportsCrankshaft();
static inline bool SupportsOptimizer();
static inline bool SupportsWasmSimd128();
@ -341,6 +338,12 @@ enum ICacheFlushMode { FLUSH_ICACHE_IF_NEEDED, SKIP_ICACHE_FLUSH };
class RelocInfo {
public:
enum Flag : uint8_t {
kNoFlags = 0,
kInNativeWasmCode = 1u << 0, // Reloc info belongs to native wasm code.
};
typedef base::Flags<Flag> Flags;
// This string is used to add padding comments to the reloc info in cases
// where we are not sure to have enough space for patching in during
// lazy deoptimization. This is the case if we have indirect calls for which
@ -357,7 +360,7 @@ class RelocInfo {
// The maximum pc delta that will use the short encoding.
static const int kMaxSmallPCDelta;
enum Mode {
enum Mode : int8_t {
// Please note the order is important (see IsCodeTarget, IsGCRelocMode).
CODE_TARGET,
EMBEDDED_OBJECT,
@ -395,8 +398,7 @@ class RelocInfo {
// Pseudo-types
NUMBER_OF_MODES,
NONE32, // never recorded 32-bit value
NONE64, // never recorded 64-bit value
NONE, // never recorded value
FIRST_REAL_RELOC_MODE = CODE_TARGET,
LAST_REAL_RELOC_MODE = VENEER_POOL,
@ -456,9 +458,7 @@ class RelocInfo {
static inline bool IsInternalReferenceEncoded(Mode mode) {
return mode == INTERNAL_REFERENCE_ENCODED;
}
static inline bool IsNone(Mode mode) {
return mode == NONE32 || mode == NONE64;
}
static inline bool IsNone(Mode mode) { return mode == NONE; }
static inline bool IsWasmContextReference(Mode mode) {
return mode == WASM_CONTEXT_REFERENCE;
}
@ -476,7 +476,7 @@ class RelocInfo {
mode == WASM_CALL || mode == JS_TO_WASM_CALL;
}
static inline int ModeMask(Mode mode) { return 1 << mode; }
static constexpr int ModeMask(Mode mode) { return 1 << mode; }
// Accessors
byte* pc() const { return pc_; }
@ -485,6 +485,9 @@ class RelocInfo {
intptr_t data() const { return data_; }
Code* host() const { return host_; }
Address constant_pool() const { return constant_pool_; }
void set_constant_pool(Address constant_pool) {
constant_pool_ = constant_pool;
}
// Apply a relocation by delta bytes. When the code object is moved, PC
// relative addresses have to be updated as well as absolute addresses
@ -508,25 +511,22 @@ class RelocInfo {
Address wasm_call_address() const;
void set_wasm_context_reference(
Isolate* isolate, Address address,
Address address,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void update_wasm_function_table_size_reference(
Isolate* isolate, uint32_t old_base, uint32_t new_base,
uint32_t old_base, uint32_t new_base,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void set_target_address(
Isolate* isolate, Address target,
Address target,
WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void set_global_handle(
Isolate* isolate, Address address,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void set_global_handle(Address address, ICacheFlushMode icache_flush_mode =
FLUSH_ICACHE_IF_NEEDED);
void set_wasm_call_address(
Isolate*, Address,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
Address, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void set_js_to_wasm_address(
Isolate*, Address,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
Address, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// this relocation applies to;
// can only be called if IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
@ -539,7 +539,7 @@ class RelocInfo {
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
INLINE(Address target_runtime_entry(Assembler* origin));
INLINE(void set_target_runtime_entry(
Isolate* isolate, Address target,
Address target,
WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
INLINE(Cell* target_cell());
@ -585,15 +585,15 @@ class RelocInfo {
// Wipe out a relocation to a fixed value, used for making snapshots
// reproducible.
INLINE(void WipeOut(Isolate* isolate));
INLINE(void WipeOut());
template <typename ObjectVisitor>
inline void Visit(Isolate* isolate, ObjectVisitor* v);
inline void Visit(ObjectVisitor* v);
#ifdef DEBUG
// Check whether the given code contains relocation information that
// either is position-relative or movable by the garbage collector.
static bool RequiresRelocation(Isolate* isolate, const CodeDesc& desc);
static bool RequiresRelocation(const CodeDesc& desc);
#endif
#ifdef ENABLE_DISASSEMBLER
@ -609,10 +609,8 @@ class RelocInfo {
static const int kApplyMask; // Modes affected by apply. Depends on arch.
private:
void set_embedded_address(Isolate* isolate, Address address,
ICacheFlushMode flush_mode);
void set_embedded_size(Isolate* isolate, uint32_t size,
ICacheFlushMode flush_mode);
void set_embedded_address(Address address, ICacheFlushMode flush_mode);
void set_embedded_size(uint32_t size, ICacheFlushMode flush_mode);
uint32_t embedded_size() const;
Address embedded_address() const;
@ -623,9 +621,10 @@ class RelocInfo {
// comment).
byte* pc_;
Mode rmode_;
intptr_t data_;
intptr_t data_ = 0;
Code* host_;
Address constant_pool_ = nullptr;
Flags flags_;
friend class RelocIterator;
};
@ -635,7 +634,6 @@ class RelocInfo {
class RelocInfoWriter BASE_EMBEDDED {
public:
RelocInfoWriter() : pos_(nullptr), last_pc_(nullptr) {}
RelocInfoWriter(byte* pos, byte* pc) : pos_(pos), last_pc_(pc) {}
byte* pos() const { return pos_; }
byte* last_pc() const { return last_pc_; }
@ -651,10 +649,7 @@ class RelocInfoWriter BASE_EMBEDDED {
// Max size (bytes) of a written RelocInfo. Longest encoding is
// ExtraTag, VariableLengthPCJump, ExtraTag, pc_delta, data_delta.
// On ia32 and arm this is 1 + 4 + 1 + 1 + 4 = 11.
// On x64 this is 1 + 4 + 1 + 1 + 8 == 15;
// Here we use the maximum of the two.
static const int kMaxSize = 15;
static constexpr int kMaxSize = 1 + 4 + 1 + 1 + kPointerSize;
private:
inline uint32_t WriteLongPCJump(uint32_t pc_delta);
@ -669,7 +664,6 @@ class RelocInfoWriter BASE_EMBEDDED {
byte* pos_;
byte* last_pc_;
RelocInfo::Mode last_mode_;
DISALLOW_COPY_AND_ASSIGN(RelocInfoWriter);
};
@ -733,19 +727,14 @@ class RelocIterator: public Malloced {
const byte* pos_;
const byte* end_;
RelocInfo rinfo_;
bool done_;
int mode_mask_;
bool done_ = false;
const int mode_mask_;
DISALLOW_COPY_AND_ASSIGN(RelocIterator);
};
//------------------------------------------------------------------------------
// External function
//----------------------------------------------------------------------------
class SCTableReference;
class Debug_Address;
// External references
// An ExternalReference represents a C++ address used in the generated
// code. All references to C++ functions and variables must be encapsulated in
@ -800,9 +789,7 @@ class ExternalReference BASE_EMBEDDED {
static void SetUp();
// These functions must use the isolate in a thread-safe way.
typedef void* ExternalReferenceRedirector(Isolate* isolate, void* original,
Type type);
typedef void* ExternalReferenceRedirector(void* original, Type type);
ExternalReference() : address_(nullptr) {}
@ -999,6 +986,7 @@ class ExternalReference BASE_EMBEDDED {
Isolate* isolate);
static ExternalReference copy_typed_array_elements_to_typed_array(
Isolate* isolate);
static ExternalReference copy_typed_array_elements_slice(Isolate* isolate);
static ExternalReference page_flags(Page* page);
@ -1073,9 +1061,8 @@ class ExternalReference BASE_EMBEDDED {
reinterpret_cast<ExternalReferenceRedirector*>(
isolate->external_reference_redirector());
void* address = reinterpret_cast<void*>(address_arg);
void* answer = (redirector == nullptr)
? address
: (*redirector)(isolate, address, type);
void* answer =
(redirector == nullptr) ? address : (*redirector)(address, type);
return answer;
}

View File

@ -6,7 +6,6 @@ littledan@chromium.org
marja@chromium.org
mstarzinger@chromium.org
neis@chromium.org
rossberg@chromium.org
verwaest@chromium.org
# COMPONENT: Blink>JavaScript>Language

View File

@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_AST_AST_FUNCTION_LITERAL_ID_REINDEXER
#define V8_AST_AST_FUNCTION_LITERAL_ID_REINDEXER
#ifndef V8_AST_AST_FUNCTION_LITERAL_ID_REINDEXER_H_
#define V8_AST_AST_FUNCTION_LITERAL_ID_REINDEXER_H_
#include "src/ast/ast-traversal-visitor.h"
#include "src/base/macros.h"
@ -33,4 +33,4 @@ class AstFunctionLiteralIdReindexer final
} // namespace internal
} // namespace v8
#endif // V8_AST_AST_FUNCTION_LITERAL_ID_REINDEXER
#endif // V8_AST_AST_FUNCTION_LITERAL_ID_REINDEXER_H_

View File

@ -1,410 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/ast/ast-numbering.h"
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
#include "src/compiler.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
public:
AstNumberingVisitor(uintptr_t stack_limit, Zone* zone,
Compiler::EagerInnerFunctionLiterals* eager_literals)
: zone_(zone), eager_literals_(eager_literals), suspend_count_(0) {
InitializeAstVisitor(stack_limit);
}
bool Renumber(FunctionLiteral* node);
private:
// AST node visitor interface.
#define DEFINE_VISIT(type) void Visit##type(type* node);
AST_NODE_LIST(DEFINE_VISIT)
#undef DEFINE_VISIT
void VisitSuspend(Suspend* node);
void VisitStatementsAndDeclarations(Block* node);
void VisitStatements(ZoneList<Statement*>* statements);
void VisitDeclarations(Declaration::List* declarations);
void VisitArguments(ZoneList<Expression*>* arguments);
void VisitLiteralProperty(LiteralProperty* property);
Zone* zone() const { return zone_; }
Zone* zone_;
Compiler::EagerInnerFunctionLiterals* eager_literals_;
int suspend_count_;
FunctionKind function_kind_;
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
DISALLOW_COPY_AND_ASSIGN(AstNumberingVisitor);
};
void AstNumberingVisitor::VisitVariableDeclaration(VariableDeclaration* node) {
VisitVariableProxy(node->proxy());
}
void AstNumberingVisitor::VisitEmptyStatement(EmptyStatement* node) {
}
void AstNumberingVisitor::VisitSloppyBlockFunctionStatement(
SloppyBlockFunctionStatement* node) {
Visit(node->statement());
}
void AstNumberingVisitor::VisitContinueStatement(ContinueStatement* node) {
}
void AstNumberingVisitor::VisitBreakStatement(BreakStatement* node) {
}
void AstNumberingVisitor::VisitDebuggerStatement(DebuggerStatement* node) {
}
void AstNumberingVisitor::VisitNativeFunctionLiteral(
NativeFunctionLiteral* node) {
}
void AstNumberingVisitor::VisitDoExpression(DoExpression* node) {
Visit(node->block());
Visit(node->result());
}
void AstNumberingVisitor::VisitLiteral(Literal* node) {
}
void AstNumberingVisitor::VisitRegExpLiteral(RegExpLiteral* node) {
}
void AstNumberingVisitor::VisitVariableProxy(VariableProxy* node) {
}
void AstNumberingVisitor::VisitThisFunction(ThisFunction* node) {
}
void AstNumberingVisitor::VisitSuperPropertyReference(
SuperPropertyReference* node) {
Visit(node->this_var());
Visit(node->home_object());
}
void AstNumberingVisitor::VisitSuperCallReference(SuperCallReference* node) {
Visit(node->this_var());
Visit(node->new_target_var());
Visit(node->this_function_var());
}
void AstNumberingVisitor::VisitExpressionStatement(ExpressionStatement* node) {
Visit(node->expression());
}
void AstNumberingVisitor::VisitReturnStatement(ReturnStatement* node) {
Visit(node->expression());
}
void AstNumberingVisitor::VisitSuspend(Suspend* node) {
node->set_suspend_id(suspend_count_);
suspend_count_++;
Visit(node->expression());
}
void AstNumberingVisitor::VisitYield(Yield* node) { VisitSuspend(node); }
void AstNumberingVisitor::VisitYieldStar(YieldStar* node) {
node->set_suspend_id(suspend_count_++);
if (IsAsyncGeneratorFunction(function_kind_)) {
node->set_await_iterator_close_suspend_id(suspend_count_++);
node->set_await_delegated_iterator_output_suspend_id(suspend_count_++);
}
Visit(node->expression());
}
void AstNumberingVisitor::VisitAwait(Await* node) { VisitSuspend(node); }
void AstNumberingVisitor::VisitThrow(Throw* node) {
Visit(node->exception());
}
void AstNumberingVisitor::VisitUnaryOperation(UnaryOperation* node) {
Visit(node->expression());
}
void AstNumberingVisitor::VisitCountOperation(CountOperation* node) {
Visit(node->expression());
}
void AstNumberingVisitor::VisitBlock(Block* node) {
VisitStatementsAndDeclarations(node);
}
void AstNumberingVisitor::VisitStatementsAndDeclarations(Block* node) {
Scope* scope = node->scope();
DCHECK(scope == nullptr || !scope->HasBeenRemoved());
if (scope) VisitDeclarations(scope->declarations());
VisitStatements(node->statements());
}
void AstNumberingVisitor::VisitFunctionDeclaration(FunctionDeclaration* node) {
VisitVariableProxy(node->proxy());
VisitFunctionLiteral(node->fun());
}
void AstNumberingVisitor::VisitCallRuntime(CallRuntime* node) {
VisitArguments(node->arguments());
}
void AstNumberingVisitor::VisitWithStatement(WithStatement* node) {
Visit(node->expression());
Visit(node->statement());
}
void AstNumberingVisitor::VisitDoWhileStatement(DoWhileStatement* node) {
node->set_first_suspend_id(suspend_count_);
Visit(node->body());
Visit(node->cond());
node->set_suspend_count(suspend_count_ - node->first_suspend_id());
}
void AstNumberingVisitor::VisitWhileStatement(WhileStatement* node) {
node->set_first_suspend_id(suspend_count_);
Visit(node->cond());
Visit(node->body());
node->set_suspend_count(suspend_count_ - node->first_suspend_id());
}
void AstNumberingVisitor::VisitTryCatchStatement(TryCatchStatement* node) {
DCHECK(node->scope() == nullptr || !node->scope()->HasBeenRemoved());
Visit(node->try_block());
Visit(node->catch_block());
}
void AstNumberingVisitor::VisitTryFinallyStatement(TryFinallyStatement* node) {
Visit(node->try_block());
Visit(node->finally_block());
}
void AstNumberingVisitor::VisitProperty(Property* node) {
Visit(node->key());
Visit(node->obj());
}
void AstNumberingVisitor::VisitResolvedProperty(ResolvedProperty* node) {
Visit(node->object());
Visit(node->property());
}
void AstNumberingVisitor::VisitAssignment(Assignment* node) {
Visit(node->target());
Visit(node->value());
}
void AstNumberingVisitor::VisitCompoundAssignment(CompoundAssignment* node) {
VisitBinaryOperation(node->binary_operation());
VisitAssignment(node);
}
void AstNumberingVisitor::VisitBinaryOperation(BinaryOperation* node) {
Visit(node->left());
Visit(node->right());
}
void AstNumberingVisitor::VisitNaryOperation(NaryOperation* node) {
Visit(node->first());
for (size_t i = 0; i < node->subsequent_length(); ++i) {
Visit(node->subsequent(i));
}
}
void AstNumberingVisitor::VisitCompareOperation(CompareOperation* node) {
Visit(node->left());
Visit(node->right());
}
void AstNumberingVisitor::VisitSpread(Spread* node) {
Visit(node->expression());
}
void AstNumberingVisitor::VisitEmptyParentheses(EmptyParentheses* node) {
UNREACHABLE();
}
void AstNumberingVisitor::VisitGetIterator(GetIterator* node) {
Visit(node->iterable());
}
void AstNumberingVisitor::VisitGetTemplateObject(GetTemplateObject* node) {}
void AstNumberingVisitor::VisitImportCallExpression(
ImportCallExpression* node) {
Visit(node->argument());
}
void AstNumberingVisitor::VisitForInStatement(ForInStatement* node) {
Visit(node->enumerable()); // Not part of loop.
node->set_first_suspend_id(suspend_count_);
Visit(node->each());
Visit(node->body());
node->set_suspend_count(suspend_count_ - node->first_suspend_id());
}
void AstNumberingVisitor::VisitForOfStatement(ForOfStatement* node) {
Visit(node->assign_iterator()); // Not part of loop.
Visit(node->assign_next());
node->set_first_suspend_id(suspend_count_);
Visit(node->next_result());
Visit(node->result_done());
Visit(node->assign_each());
Visit(node->body());
node->set_suspend_count(suspend_count_ - node->first_suspend_id());
}
void AstNumberingVisitor::VisitConditional(Conditional* node) {
Visit(node->condition());
Visit(node->then_expression());
Visit(node->else_expression());
}
void AstNumberingVisitor::VisitIfStatement(IfStatement* node) {
Visit(node->condition());
Visit(node->then_statement());
if (node->HasElseStatement()) {
Visit(node->else_statement());
}
}
void AstNumberingVisitor::VisitSwitchStatement(SwitchStatement* node) {
Visit(node->tag());
for (CaseClause* clause : *node->cases()) {
if (!clause->is_default()) Visit(clause->label());
VisitStatements(clause->statements());
}
}
void AstNumberingVisitor::VisitForStatement(ForStatement* node) {
if (node->init() != nullptr) Visit(node->init()); // Not part of loop.
node->set_first_suspend_id(suspend_count_);
if (node->cond() != nullptr) Visit(node->cond());
if (node->next() != nullptr) Visit(node->next());
Visit(node->body());
node->set_suspend_count(suspend_count_ - node->first_suspend_id());
}
void AstNumberingVisitor::VisitClassLiteral(ClassLiteral* node) {
if (node->extends()) Visit(node->extends());
if (node->constructor()) Visit(node->constructor());
if (node->static_fields_initializer() != nullptr) {
Visit(node->static_fields_initializer());
}
if (node->instance_fields_initializer_function() != nullptr) {
Visit(node->instance_fields_initializer_function());
}
for (int i = 0; i < node->properties()->length(); i++) {
VisitLiteralProperty(node->properties()->at(i));
}
}
void AstNumberingVisitor::VisitInitializeClassFieldsStatement(
InitializeClassFieldsStatement* node) {
for (int i = 0; i < node->fields()->length(); i++) {
VisitLiteralProperty(node->fields()->at(i));
}
}
void AstNumberingVisitor::VisitObjectLiteral(ObjectLiteral* node) {
for (int i = 0; i < node->properties()->length(); i++) {
VisitLiteralProperty(node->properties()->at(i));
}
}
void AstNumberingVisitor::VisitLiteralProperty(LiteralProperty* node) {
Visit(node->key());
Visit(node->value());
}
void AstNumberingVisitor::VisitArrayLiteral(ArrayLiteral* node) {
for (int i = 0; i < node->values()->length(); i++) {
Visit(node->values()->at(i));
}
}
void AstNumberingVisitor::VisitCall(Call* node) {
Visit(node->expression());
VisitArguments(node->arguments());
}
void AstNumberingVisitor::VisitCallNew(CallNew* node) {
Visit(node->expression());
VisitArguments(node->arguments());
}
void AstNumberingVisitor::VisitStatements(ZoneList<Statement*>* statements) {
if (statements == nullptr) return;
for (int i = 0; i < statements->length(); i++) {
Visit(statements->at(i));
if (statements->at(i)->IsJump()) break;
}
}
void AstNumberingVisitor::VisitDeclarations(Declaration::List* decls) {
for (Declaration* decl : *decls) Visit(decl);
}
void AstNumberingVisitor::VisitArguments(ZoneList<Expression*>* arguments) {
for (int i = 0; i < arguments->length(); i++) {
Visit(arguments->at(i));
}
}
void AstNumberingVisitor::VisitFunctionLiteral(FunctionLiteral* node) {
if (node->ShouldEagerCompile()) {
if (eager_literals_) {
eager_literals_->Add(new (zone())
ThreadedListZoneEntry<FunctionLiteral*>(node));
}
// If the function literal is being eagerly compiled, recurse into the
// declarations and body of the function literal.
if (!AstNumbering::Renumber(stack_limit_, zone_, node, eager_literals_)) {
SetStackOverflow();
return;
}
}
}
void AstNumberingVisitor::VisitRewritableExpression(
RewritableExpression* node) {
Visit(node->expression());
}
bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
DeclarationScope* scope = node->scope();
DCHECK(!scope->HasBeenRemoved());
function_kind_ = node->kind();
VisitDeclarations(scope->declarations());
VisitStatements(node->body());
node->set_suspend_count(suspend_count_);
return !HasStackOverflow();
}
bool AstNumbering::Renumber(
uintptr_t stack_limit, Zone* zone, FunctionLiteral* function,
Compiler::EagerInnerFunctionLiterals* eager_literals) {
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
AstNumberingVisitor visitor(stack_limit, zone, eager_literals);
return visitor.Renumber(function);
}
} // namespace internal
} // namespace v8

View File

@ -1,55 +0,0 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_AST_AST_NUMBERING_H_
#define V8_AST_AST_NUMBERING_H_
#include <stdint.h>
namespace v8 {
namespace internal {
// Forward declarations.
class FunctionLiteral;
class Isolate;
class Zone;
template <typename T>
class ThreadedList;
template <typename T>
class ThreadedListZoneEntry;
template <typename T>
class ZoneVector;
namespace AstNumbering {
// Assign bailout IDs, and generator suspend IDs to an AST node tree; perform
// catch prediction for TryStatements. If |eager_literals| is non-null, adds any
// eager inner literal functions into it.
bool Renumber(
uintptr_t stack_limit, Zone* zone, FunctionLiteral* function,
ThreadedList<ThreadedListZoneEntry<FunctionLiteral*>>* eager_literals);
}
// Some details on suspend IDs
// -------------------------
//
// In order to assist Ignition in generating bytecode for a generator function,
// we assign a unique number (the suspend ID) to each Suspend node in its AST.
// We also annotate loops with the number of suspends they contain
// (loop.suspend_count) and the smallest ID of those (loop.first_suspend_id),
// and we annotate the function itself with the number of suspends it contains
// (function.suspend_count).
//
// The way in which we choose the IDs is simply by enumerating the Suspend
// nodes.
// Ignition relies on the following properties:
// - For each loop l and each suspend y of l:
// l.first_suspend_id <=
// s.suspend_id < l.first_suspend_id + l.suspend_count
// - For the generator function f itself and each suspend s of f:
// 0 <= s.suspend_id < f.suspend_count
} // namespace internal
} // namespace v8
#endif // V8_AST_AST_NUMBERING_H_

View File

@ -311,7 +311,7 @@ ClassLiteralProperty::ClassLiteralProperty(Expression* key, Expression* value,
: LiteralProperty(key, value, is_computed_name),
kind_(kind),
is_static_(is_static),
computed_name_var_(nullptr) {}
private_or_computed_name_var_(nullptr) {}
bool ObjectLiteral::Property::IsCompileTimeValue() const {
return kind_ == CONSTANT ||
@ -683,8 +683,8 @@ Handle<TemplateObjectDescription> GetTemplateObject::GetOrBuildDescription(
}
}
}
return isolate->factory()->NewTemplateObjectDescription(
this->hash(), raw_strings, cooked_strings);
return isolate->factory()->NewTemplateObjectDescription(raw_strings,
cooked_strings);
}
static bool IsCommutativeOperationWithSmiLiteral(Token::Value op) {

106
deps/v8/src/ast/ast.h vendored
View File

@ -437,21 +437,12 @@ class IterationStatement : public BreakableStatement {
ZoneList<const AstRawString*>* labels() const { return labels_; }
int suspend_count() const { return suspend_count_; }
int first_suspend_id() const { return first_suspend_id_; }
void set_suspend_count(int suspend_count) { suspend_count_ = suspend_count; }
void set_first_suspend_id(int first_suspend_id) {
first_suspend_id_ = first_suspend_id;
}
protected:
IterationStatement(ZoneList<const AstRawString*>* labels, int pos,
NodeType type)
: BreakableStatement(TARGET_FOR_ANONYMOUS, pos, type),
labels_(labels),
body_(nullptr),
suspend_count_(0),
first_suspend_id_(0) {}
body_(nullptr) {}
void Initialize(Statement* body) { body_ = body; }
static const uint8_t kNextBitFieldIndex =
@ -460,8 +451,6 @@ class IterationStatement : public BreakableStatement {
private:
ZoneList<const AstRawString*>* labels_;
Statement* body_;
int suspend_count_;
int first_suspend_id_;
};
@ -1486,6 +1475,7 @@ class ArrayLiteral final : public AggregateLiteral {
ZoneList<Expression*>* values_;
};
enum class HoleCheckMode { kRequired, kElided };
class VariableProxy final : public Expression {
public:
@ -1540,6 +1530,11 @@ class VariableProxy final : public Expression {
HoleCheckModeField::update(bit_field_, HoleCheckMode::kRequired);
}
bool is_private_field() const { return IsPrivateField::decode(bit_field_); }
void set_is_private_field() {
bit_field_ = IsPrivateField::update(bit_field_, true);
}
// Bind this proxy to the variable var.
void BindTo(Variable* var);
@ -1559,7 +1554,8 @@ class VariableProxy final : public Expression {
bit_field_ |= IsThisField::encode(variable_kind == THIS_VARIABLE) |
IsAssignedField::encode(false) |
IsResolvedField::encode(false) |
HoleCheckModeField::encode(HoleCheckMode::kElided);
HoleCheckModeField::encode(HoleCheckMode::kElided) |
IsPrivateField::encode(false);
}
explicit VariableProxy(const VariableProxy* copy_from);
@ -1571,6 +1567,7 @@ class VariableProxy final : public Expression {
class IsNewTargetField : public BitField<bool, IsResolvedField::kNext, 1> {};
class HoleCheckModeField
: public BitField<HoleCheckMode, IsNewTargetField::kNext, 1> {};
class IsPrivateField : public BitField<bool, HoleCheckModeField::kNext, 1> {};
union {
const AstRawString* raw_name_; // if !is_resolved_
@ -1590,7 +1587,6 @@ enum LhsKind {
KEYED_SUPER_PROPERTY
};
class Property final : public Expression {
public:
bool IsValidReferenceExpression() const { return true; }
@ -2096,11 +2092,6 @@ class Suspend : public Expression {
return OnAbruptResumeField::decode(bit_field_);
}
int suspend_id() const { return suspend_id_; }
void set_suspend_id(int id) { suspend_id_ = id; }
inline bool IsInitialYield() const { return suspend_id_ == 0 && IsYield(); }
private:
friend class AstNodeFactory;
friend class Yield;
@ -2109,11 +2100,10 @@ class Suspend : public Expression {
Suspend(NodeType node_type, Expression* expression, int pos,
OnAbruptResume on_abrupt_resume)
: Expression(pos, node_type), suspend_id_(-1), expression_(expression) {
: Expression(pos, node_type), expression_(expression) {
bit_field_ |= OnAbruptResumeField::encode(on_abrupt_resume);
}
int suspend_id_;
Expression* expression_;
class OnAbruptResumeField
@ -2128,47 +2118,11 @@ class Yield final : public Suspend {
};
class YieldStar final : public Suspend {
public:
// In addition to the normal suspend for yield*, a yield* in an async
// generator has 2 additional suspends:
// - One for awaiting the iterator result of closing the generator when
// resumed with a "throw" completion, and a throw method is not present
// on the delegated iterator (await_iterator_close_suspend_id)
// - One for awaiting the iterator result yielded by the delegated iterator
// (await_delegated_iterator_output_suspend_id)
int await_iterator_close_suspend_id() const {
return await_iterator_close_suspend_id_;
}
void set_await_iterator_close_suspend_id(int id) {
await_iterator_close_suspend_id_ = id;
}
int await_delegated_iterator_output_suspend_id() const {
return await_delegated_iterator_output_suspend_id_;
}
void set_await_delegated_iterator_output_suspend_id(int id) {
await_delegated_iterator_output_suspend_id_ = id;
}
inline int suspend_count() const {
if (await_iterator_close_suspend_id_ != -1) {
DCHECK_NE(-1, await_delegated_iterator_output_suspend_id_);
return 3;
}
return 1;
}
private:
friend class AstNodeFactory;
YieldStar(Expression* expression, int pos)
: Suspend(kYieldStar, expression, pos,
Suspend::OnAbruptResume::kNoControl),
await_iterator_close_suspend_id_(-1),
await_delegated_iterator_output_suspend_id_(-1) {}
int await_iterator_close_suspend_id_;
int await_delegated_iterator_output_suspend_id_;
Suspend::OnAbruptResume::kNoControl) {}
};
class Await final : public Suspend {
@ -2407,14 +2361,29 @@ class FunctionLiteral final : public Expression {
// about a class literal's properties from the parser to the code generator.
class ClassLiteralProperty final : public LiteralProperty {
public:
enum Kind : uint8_t { METHOD, GETTER, SETTER, FIELD };
enum Kind : uint8_t { METHOD, GETTER, SETTER, PUBLIC_FIELD, PRIVATE_FIELD };
Kind kind() const { return kind_; }
bool is_static() const { return is_static_; }
void set_computed_name_var(Variable* var) { computed_name_var_ = var; }
Variable* computed_name_var() const { return computed_name_var_; }
void set_computed_name_var(Variable* var) {
DCHECK_EQ(PUBLIC_FIELD, kind());
private_or_computed_name_var_ = var;
}
Variable* computed_name_var() const {
DCHECK_EQ(PUBLIC_FIELD, kind());
return private_or_computed_name_var_;
}
void set_private_field_name_var(Variable* var) {
DCHECK_EQ(PRIVATE_FIELD, kind());
private_or_computed_name_var_ = var;
}
Variable* private_field_name_var() const {
DCHECK_EQ(PRIVATE_FIELD, kind());
return private_or_computed_name_var_;
}
private:
friend class AstNodeFactory;
@ -2424,7 +2393,7 @@ class ClassLiteralProperty final : public LiteralProperty {
Kind kind_;
bool is_static_;
Variable* computed_name_var_;
Variable* private_or_computed_name_var_;
};
class InitializeClassFieldsStatement final : public Statement {
@ -2665,7 +2634,6 @@ class GetTemplateObject final : public Expression {
const ZoneList<const AstRawString*>* raw_strings() const {
return raw_strings_;
}
int hash() const { return hash_; }
Handle<TemplateObjectDescription> GetOrBuildDescription(Isolate* isolate);
@ -2673,16 +2641,13 @@ class GetTemplateObject final : public Expression {
friend class AstNodeFactory;
GetTemplateObject(const ZoneList<const AstRawString*>* cooked_strings,
const ZoneList<const AstRawString*>* raw_strings, int hash,
int pos)
const ZoneList<const AstRawString*>* raw_strings, int pos)
: Expression(pos, kGetTemplateObject),
cooked_strings_(cooked_strings),
raw_strings_(raw_strings),
hash_(hash) {}
raw_strings_(raw_strings) {}
const ZoneList<const AstRawString*>* cooked_strings_;
const ZoneList<const AstRawString*>* raw_strings_;
int hash_;
};
// ----------------------------------------------------------------------------
@ -3257,9 +3222,8 @@ class AstNodeFactory final BASE_EMBEDDED {
GetTemplateObject* NewGetTemplateObject(
const ZoneList<const AstRawString*>* cooked_strings,
const ZoneList<const AstRawString*>* raw_strings, int hash, int pos) {
return new (zone_)
GetTemplateObject(cooked_strings, raw_strings, hash, pos);
const ZoneList<const AstRawString*>* raw_strings, int pos) {
return new (zone_) GetTemplateObject(cooked_strings, raw_strings, pos);
}
ImportCallExpression* NewImportCallExpression(Expression* args, int pos) {

View File

@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_AST_COMPILE_TIME_VALUE
#define V8_AST_COMPILE_TIME_VALUE
#ifndef V8_AST_COMPILE_TIME_VALUE_H_
#define V8_AST_COMPILE_TIME_VALUE_H_
#include "src/allocation.h"
#include "src/globals.h"
@ -43,4 +43,4 @@ class CompileTimeValue : public AllStatic {
} // namespace internal
} // namespace v8
#endif // V8_AST_COMPILE_TIME_VALUE
#endif // V8_AST_COMPILE_TIME_VALUE_H_

Some files were not shown because too many files have changed in this diff Show More