deps: update V8 to 6.5.254.31
PR-URL: https://github.com/nodejs/node/pull/18453 Reviewed-By: James M Snell <jasnell@gmail.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Yang Guo <yangguo@chromium.org> Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com> Reviewed-By: Michael Dawson <michael_dawson@ca.ibm.com>
This commit is contained in:
parent
4e86f9b5ab
commit
88786fecff
4
deps/v8/.gitignore
vendored
4
deps/v8/.gitignore
vendored
@ -50,9 +50,6 @@
|
||||
/test/fuzzer/wasm_corpus
|
||||
/test/fuzzer/wasm_corpus.tar.gz
|
||||
/test/mozilla/data
|
||||
/test/promises-aplus/promises-tests
|
||||
/test/promises-aplus/promises-tests.tar.gz
|
||||
/test/promises-aplus/sinon
|
||||
/test/test262/data
|
||||
/test/test262/data.tar
|
||||
/test/test262/harness
|
||||
@ -94,6 +91,7 @@ TAGS
|
||||
bsuite
|
||||
compile_commands.json
|
||||
d8
|
||||
!/test/mjsunit/d8
|
||||
d8_g
|
||||
gccauses
|
||||
gcsuspects
|
||||
|
5
deps/v8/AUTHORS
vendored
5
deps/v8/AUTHORS
vendored
@ -96,6 +96,7 @@ Luis Reis <luis.m.reis@gmail.com>
|
||||
Luke Zarko <lukezarko@gmail.com>
|
||||
Maciej Małecki <me@mmalecki.com>
|
||||
Marcin Cieślak <saper@marcincieslak.com>
|
||||
Marcin Wiącek <marcin@mwiacek.com>
|
||||
Mateusz Czeladka <mateusz.szczap@gmail.com>
|
||||
Mathias Bynens <mathias@qiwi.be>
|
||||
Matt Hanselman <mjhanselman@gmail.com>
|
||||
@ -106,6 +107,7 @@ Michael Smith <mike@w3.org>
|
||||
Michaël Zasso <mic.besace@gmail.com>
|
||||
Mike Gilbert <floppymaster@gmail.com>
|
||||
Mike Pennisi <mike@mikepennisi.com>
|
||||
Mikhail Gusarov <dottedmag@dottedmag.net>
|
||||
Milton Chiang <milton.chiang@mediatek.com>
|
||||
Myeong-bo Shim <m0609.shim@samsung.com>
|
||||
Nicolas Antonius Ernst Leopold Maria Kaiser <nikai@nikai.net>
|
||||
@ -118,6 +120,7 @@ Peter Rybin <peter.rybin@gmail.com>
|
||||
Peter Varga <pvarga@inf.u-szeged.hu>
|
||||
Peter Wong <peter.wm.wong@gmail.com>
|
||||
Paul Lind <plind44@gmail.com>
|
||||
Qingyan Li <qingyan.liqy@alibaba-inc.com>
|
||||
Qiuyi Zhang <qiuyi.zqy@alibaba-inc.com>
|
||||
Rafal Krypa <rafal@krypa.net>
|
||||
Refael Ackermann <refack@gmail.com>
|
||||
@ -133,6 +136,8 @@ Sanjoy Das <sanjoy@playingwithpointers.com>
|
||||
Seo Sanghyeon <sanxiyn@gmail.com>
|
||||
Stefan Penner <stefan.penner@gmail.com>
|
||||
Sylvestre Ledru <sledru@mozilla.com>
|
||||
Taketoshi Aono <brn@b6n.ch>
|
||||
Tiancheng "Timothy" Gu <timothygu99@gmail.com>
|
||||
Tobias Burnus <burnus@net-b.de>
|
||||
Victor Costan <costan@gmail.com>
|
||||
Vlad Burlik <vladbph@gmail.com>
|
||||
|
75
deps/v8/BUILD.gn
vendored
75
deps/v8/BUILD.gn
vendored
@ -89,6 +89,9 @@ declare_args() {
|
||||
# Sets -dV8_CONCURRENT_MARKING
|
||||
v8_enable_concurrent_marking = true
|
||||
|
||||
# Enables various testing features.
|
||||
v8_enable_test_features = ""
|
||||
|
||||
# Build the snapshot with unwinding information for perf.
|
||||
# Sets -dV8_USE_SNAPSHOT_WITH_UNWINDING_INFO.
|
||||
v8_perf_prof_unwinding_info = false
|
||||
@ -133,8 +136,6 @@ declare_args() {
|
||||
# while rolling in a new version of V8.
|
||||
v8_check_microtasks_scopes_consistency = ""
|
||||
|
||||
v8_monolithic = false
|
||||
|
||||
# Enable mitigations for executing untrusted code.
|
||||
v8_untrusted_code_mitigations = true
|
||||
}
|
||||
@ -152,6 +153,9 @@ if (v8_enable_disassembler == "") {
|
||||
if (v8_enable_trace_maps == "") {
|
||||
v8_enable_trace_maps = is_debug
|
||||
}
|
||||
if (v8_enable_test_features == "") {
|
||||
v8_enable_test_features = is_debug || dcheck_always_on
|
||||
}
|
||||
if (v8_enable_v8_checks == "") {
|
||||
v8_enable_v8_checks = is_debug
|
||||
}
|
||||
@ -278,6 +282,10 @@ config("features") {
|
||||
if (v8_enable_trace_feedback_updates) {
|
||||
defines += [ "V8_TRACE_FEEDBACK_UPDATES" ]
|
||||
}
|
||||
if (v8_enable_test_features) {
|
||||
defines += [ "V8_ENABLE_ALLOCATION_TIMEOUT" ]
|
||||
defines += [ "V8_ENABLE_FORCE_SLOW_PATH" ]
|
||||
}
|
||||
if (v8_enable_v8_checks) {
|
||||
defines += [ "V8_ENABLE_CHECKS" ]
|
||||
}
|
||||
@ -511,6 +519,12 @@ config("toolchain") {
|
||||
# TODO(hans): Remove once http://crbug.com/428099 is resolved.
|
||||
"-Winconsistent-missing-override",
|
||||
]
|
||||
|
||||
if (v8_current_cpu != "mips" && v8_current_cpu != "mipsel") {
|
||||
# We exclude MIPS because the IsMipsArchVariant macro causes trouble.
|
||||
cflags += [ "-Wunreachable-code" ]
|
||||
}
|
||||
|
||||
if (v8_current_cpu == "x64" || v8_current_cpu == "arm64" ||
|
||||
v8_current_cpu == "mips64el") {
|
||||
cflags += [ "-Wshorten-64-to-32" ]
|
||||
@ -575,12 +589,10 @@ action("js2c") {
|
||||
"src/js/macros.py",
|
||||
"src/messages.h",
|
||||
"src/js/prologue.js",
|
||||
"src/js/v8natives.js",
|
||||
"src/js/array.js",
|
||||
"src/js/typedarray.js",
|
||||
"src/js/messages.js",
|
||||
"src/js/spread.js",
|
||||
"src/js/proxy.js",
|
||||
"src/debug/mirrors.js",
|
||||
"src/debug/debug.js",
|
||||
"src/debug/liveedit.js",
|
||||
@ -755,6 +767,10 @@ action("postmortem-metadata") {
|
||||
"src/objects-inl.h",
|
||||
"src/objects/code-inl.h",
|
||||
"src/objects/code.h",
|
||||
"src/objects/data-handler.h",
|
||||
"src/objects/data-handler-inl.h",
|
||||
"src/objects/fixed-array-inl.h",
|
||||
"src/objects/fixed-array.h",
|
||||
"src/objects/js-array-inl.h",
|
||||
"src/objects/js-array.h",
|
||||
"src/objects/js-regexp-inl.h",
|
||||
@ -1680,6 +1696,10 @@ v8_source_set("v8_base") {
|
||||
"src/heap/spaces.h",
|
||||
"src/heap/store-buffer.cc",
|
||||
"src/heap/store-buffer.h",
|
||||
"src/heap/stress-marking-observer.cc",
|
||||
"src/heap/stress-marking-observer.h",
|
||||
"src/heap/stress-scavenge-observer.cc",
|
||||
"src/heap/stress-scavenge-observer.h",
|
||||
"src/heap/sweeper.cc",
|
||||
"src/heap/sweeper.h",
|
||||
"src/heap/worklist.h",
|
||||
@ -1803,6 +1823,8 @@ v8_source_set("v8_base") {
|
||||
"src/objects/debug-objects.h",
|
||||
"src/objects/descriptor-array.h",
|
||||
"src/objects/dictionary.h",
|
||||
"src/objects/fixed-array-inl.h",
|
||||
"src/objects/fixed-array.h",
|
||||
"src/objects/frame-array-inl.h",
|
||||
"src/objects/frame-array.h",
|
||||
"src/objects/hash-table-inl.h",
|
||||
@ -1811,6 +1833,8 @@ v8_source_set("v8_base") {
|
||||
"src/objects/intl-objects.h",
|
||||
"src/objects/js-array-inl.h",
|
||||
"src/objects/js-array.h",
|
||||
"src/objects/js-collection-inl.h",
|
||||
"src/objects/js-collection.h",
|
||||
"src/objects/js-regexp-inl.h",
|
||||
"src/objects/js-regexp.h",
|
||||
"src/objects/literal-objects-inl.h",
|
||||
@ -1974,6 +1998,8 @@ v8_source_set("v8_base") {
|
||||
"src/safepoint-table.h",
|
||||
"src/setup-isolate.h",
|
||||
"src/signature.h",
|
||||
"src/simulator-base.cc",
|
||||
"src/simulator-base.h",
|
||||
"src/simulator.h",
|
||||
"src/snapshot/builtin-deserializer-allocator.cc",
|
||||
"src/snapshot/builtin-deserializer-allocator.h",
|
||||
@ -2032,6 +2058,7 @@ v8_source_set("v8_base") {
|
||||
"src/string-stream.h",
|
||||
"src/strtod.cc",
|
||||
"src/strtod.h",
|
||||
"src/third_party/utf8-decoder/utf8-decoder.h",
|
||||
"src/tracing/trace-event.cc",
|
||||
"src/tracing/trace-event.h",
|
||||
"src/tracing/traced-value.cc",
|
||||
@ -2066,6 +2093,8 @@ v8_source_set("v8_base") {
|
||||
"src/v8threads.h",
|
||||
"src/value-serializer.cc",
|
||||
"src/value-serializer.h",
|
||||
"src/vector-slot-pair.cc",
|
||||
"src/vector-slot-pair.h",
|
||||
"src/vector.h",
|
||||
"src/version.cc",
|
||||
"src/version.h",
|
||||
@ -2073,9 +2102,11 @@ v8_source_set("v8_base") {
|
||||
"src/visitors.h",
|
||||
"src/vm-state-inl.h",
|
||||
"src/vm-state.h",
|
||||
"src/wasm/baseline/liftoff-assembler-defs.h",
|
||||
"src/wasm/baseline/liftoff-assembler.cc",
|
||||
"src/wasm/baseline/liftoff-assembler.h",
|
||||
"src/wasm/baseline/liftoff-compiler.cc",
|
||||
"src/wasm/baseline/liftoff-register.h",
|
||||
"src/wasm/compilation-manager.cc",
|
||||
"src/wasm/compilation-manager.h",
|
||||
"src/wasm/decoder.h",
|
||||
@ -2097,15 +2128,18 @@ v8_source_set("v8_base") {
|
||||
"src/wasm/streaming-decoder.h",
|
||||
"src/wasm/wasm-api.cc",
|
||||
"src/wasm/wasm-api.h",
|
||||
"src/wasm/wasm-code-manager.cc",
|
||||
"src/wasm/wasm-code-manager.h",
|
||||
"src/wasm/wasm-code-specialization.cc",
|
||||
"src/wasm/wasm-code-specialization.h",
|
||||
"src/wasm/wasm-code-wrapper.cc",
|
||||
"src/wasm/wasm-code-wrapper.h",
|
||||
"src/wasm/wasm-constants.h",
|
||||
"src/wasm/wasm-debug.cc",
|
||||
"src/wasm/wasm-engine.cc",
|
||||
"src/wasm/wasm-engine.h",
|
||||
"src/wasm/wasm-external-refs.cc",
|
||||
"src/wasm/wasm-external-refs.h",
|
||||
"src/wasm/wasm-heap.cc",
|
||||
"src/wasm/wasm-heap.h",
|
||||
"src/wasm/wasm-interpreter.cc",
|
||||
"src/wasm/wasm-interpreter.h",
|
||||
"src/wasm/wasm-js.cc",
|
||||
@ -2184,7 +2218,6 @@ v8_source_set("v8_base") {
|
||||
"src/ia32/sse-instr.h",
|
||||
"src/regexp/ia32/regexp-macro-assembler-ia32.cc",
|
||||
"src/regexp/ia32/regexp-macro-assembler-ia32.h",
|
||||
"src/wasm/baseline/ia32/liftoff-assembler-ia32-defs.h",
|
||||
"src/wasm/baseline/ia32/liftoff-assembler-ia32.h",
|
||||
]
|
||||
} else if (v8_current_cpu == "x64") {
|
||||
@ -2199,7 +2232,6 @@ v8_source_set("v8_base") {
|
||||
"src/regexp/x64/regexp-macro-assembler-x64.cc",
|
||||
"src/regexp/x64/regexp-macro-assembler-x64.h",
|
||||
"src/third_party/valgrind/valgrind.h",
|
||||
"src/wasm/baseline/x64/liftoff-assembler-x64-defs.h",
|
||||
"src/wasm/baseline/x64/liftoff-assembler-x64.h",
|
||||
"src/x64/assembler-x64-inl.h",
|
||||
"src/x64/assembler-x64.cc",
|
||||
@ -2253,7 +2285,6 @@ v8_source_set("v8_base") {
|
||||
"src/debug/arm/debug-arm.cc",
|
||||
"src/regexp/arm/regexp-macro-assembler-arm.cc",
|
||||
"src/regexp/arm/regexp-macro-assembler-arm.h",
|
||||
"src/wasm/baseline/arm/liftoff-assembler-arm-defs.h",
|
||||
"src/wasm/baseline/arm/liftoff-assembler-arm.h",
|
||||
]
|
||||
} else if (v8_current_cpu == "arm64") {
|
||||
@ -2299,7 +2330,6 @@ v8_source_set("v8_base") {
|
||||
"src/debug/arm64/debug-arm64.cc",
|
||||
"src/regexp/arm64/regexp-macro-assembler-arm64.cc",
|
||||
"src/regexp/arm64/regexp-macro-assembler-arm64.h",
|
||||
"src/wasm/baseline/arm64/liftoff-assembler-arm64-defs.h",
|
||||
"src/wasm/baseline/arm64/liftoff-assembler-arm64.h",
|
||||
]
|
||||
if (use_jumbo_build) {
|
||||
@ -2336,7 +2366,6 @@ v8_source_set("v8_base") {
|
||||
"src/mips/simulator-mips.h",
|
||||
"src/regexp/mips/regexp-macro-assembler-mips.cc",
|
||||
"src/regexp/mips/regexp-macro-assembler-mips.h",
|
||||
"src/wasm/baseline/mips/liftoff-assembler-mips-defs.h",
|
||||
"src/wasm/baseline/mips/liftoff-assembler-mips.h",
|
||||
]
|
||||
} else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
|
||||
@ -2366,7 +2395,6 @@ v8_source_set("v8_base") {
|
||||
"src/mips64/simulator-mips64.h",
|
||||
"src/regexp/mips64/regexp-macro-assembler-mips64.cc",
|
||||
"src/regexp/mips64/regexp-macro-assembler-mips64.h",
|
||||
"src/wasm/baseline/mips64/liftoff-assembler-mips64-defs.h",
|
||||
"src/wasm/baseline/mips64/liftoff-assembler-mips64.h",
|
||||
]
|
||||
} else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
|
||||
@ -2396,7 +2424,6 @@ v8_source_set("v8_base") {
|
||||
"src/ppc/simulator-ppc.h",
|
||||
"src/regexp/ppc/regexp-macro-assembler-ppc.cc",
|
||||
"src/regexp/ppc/regexp-macro-assembler-ppc.h",
|
||||
"src/wasm/baseline/ppc/liftoff-assembler-ppc-defs.h",
|
||||
"src/wasm/baseline/ppc/liftoff-assembler-ppc.h",
|
||||
]
|
||||
} else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
|
||||
@ -2426,7 +2453,6 @@ v8_source_set("v8_base") {
|
||||
"src/s390/macro-assembler-s390.h",
|
||||
"src/s390/simulator-s390.cc",
|
||||
"src/s390/simulator-s390.h",
|
||||
"src/wasm/baseline/s390/liftoff-assembler-s390-defs.h",
|
||||
"src/wasm/baseline/s390/liftoff-assembler-s390.h",
|
||||
]
|
||||
}
|
||||
@ -2506,6 +2532,8 @@ v8_component("v8_libbase") {
|
||||
"src/base/once.cc",
|
||||
"src/base/once.h",
|
||||
"src/base/optional.h",
|
||||
"src/base/page-allocator.cc",
|
||||
"src/base/page-allocator.h",
|
||||
"src/base/platform/condition-variable.cc",
|
||||
"src/base/platform/condition-variable.h",
|
||||
"src/base/platform/elapsed-timer.h",
|
||||
@ -2812,6 +2840,7 @@ group("v8_fuzzers") {
|
||||
testonly = true
|
||||
deps = [
|
||||
":v8_simple_json_fuzzer",
|
||||
":v8_simple_multi_return_fuzzer",
|
||||
":v8_simple_parser_fuzzer",
|
||||
":v8_simple_regexp_fuzzer",
|
||||
":v8_simple_wasm_async_fuzzer",
|
||||
@ -3062,6 +3091,24 @@ v8_source_set("json_fuzzer") {
|
||||
v8_fuzzer("json_fuzzer") {
|
||||
}
|
||||
|
||||
v8_source_set("multi_return_fuzzer") {
|
||||
sources = [
|
||||
"test/fuzzer/multi-return.cc",
|
||||
]
|
||||
|
||||
deps = [
|
||||
":fuzzer_support",
|
||||
]
|
||||
|
||||
configs = [
|
||||
":external_config",
|
||||
":internal_config_base",
|
||||
]
|
||||
}
|
||||
|
||||
v8_fuzzer("multi_return_fuzzer") {
|
||||
}
|
||||
|
||||
v8_source_set("parser_fuzzer") {
|
||||
sources = [
|
||||
"test/fuzzer/parser.cc",
|
||||
|
1300
deps/v8/ChangeLog
vendored
1300
deps/v8/ChangeLog
vendored
File diff suppressed because it is too large
Load Diff
47
deps/v8/DEPS
vendored
47
deps/v8/DEPS
vendored
@ -5,27 +5,32 @@
|
||||
vars = {
|
||||
'checkout_instrumented_libraries': False,
|
||||
'chromium_url': 'https://chromium.googlesource.com',
|
||||
'build_for_node': False,
|
||||
}
|
||||
|
||||
deps = {
|
||||
'v8/build':
|
||||
Var('chromium_url') + '/chromium/src/build.git' + '@' + '9338ce52d0b9bcef34c38285fbd5023b62739fac',
|
||||
Var('chromium_url') + '/chromium/src/build.git' + '@' + 'b3a78cd03a95c30ff10f863f736249eb04f0f34d',
|
||||
'v8/tools/gyp':
|
||||
Var('chromium_url') + '/external/gyp.git' + '@' + 'd61a9397e668fa9843c4aa7da9e79460fe590bfb',
|
||||
'v8/third_party/icu':
|
||||
Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '741688ebf328da9adc52505248bf4e2ef868722c',
|
||||
Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'c8ca2962b46670ec89071ffd1291688983cd319c',
|
||||
'v8/third_party/instrumented_libraries':
|
||||
Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '28417458ac4dc79f68915079d0f283f682504cc0',
|
||||
Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + 'b7578b4132cf73ca3265e2ee0b7bd0a422a54ebf',
|
||||
'v8/buildtools':
|
||||
Var('chromium_url') + '/chromium/buildtools.git' + '@' + '505de88083136eefd056e5ee4ca0f01fe9b33de8',
|
||||
Var('chromium_url') + '/chromium/buildtools.git' + '@' + '6fe4a3251488f7af86d64fc25cf442e817cf6133',
|
||||
'v8/base/trace_event/common':
|
||||
Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '0e9a47d74970bee1bbfc063c47215406f8918699',
|
||||
'v8/third_party/android_ndk': {
|
||||
'url': Var('chromium_url') + '/android_ndk.git' + '@' + 'e951c37287c7d8cd915bf8d4149fd4a06d808b55',
|
||||
'condition': 'checkout_android',
|
||||
},
|
||||
'v8/third_party/android_tools': {
|
||||
'url': Var('chromium_url') + '/android_tools.git' + '@' + 'a2e9bc7c1b41d983577907df51d339fb1e0fd02f',
|
||||
'url': Var('chromium_url') + '/android_tools.git' + '@' + 'c78b25872734e0038ae2a333edc645cd96bc232d',
|
||||
'condition': 'checkout_android',
|
||||
},
|
||||
'v8/third_party/catapult': {
|
||||
'url': Var('chromium_url') + '/catapult.git' + '@' + '11d7efb857ae77eff1cea4640e3f3d9ac49cba0a',
|
||||
'url': Var('chromium_url') + '/catapult.git' + '@' + 'b4826a52853c9c2778d496f6c6fa853f777f94df',
|
||||
'condition': 'checkout_android',
|
||||
},
|
||||
'v8/third_party/colorama/src': {
|
||||
@ -37,7 +42,7 @@ deps = {
|
||||
'v8/third_party/markupsafe':
|
||||
Var('chromium_url') + '/chromium/src/third_party/markupsafe.git' + '@' + '8f45f5cfa0009d2a70589bcda0349b8cb2b72783',
|
||||
'v8/tools/swarming_client':
|
||||
Var('chromium_url') + '/infra/luci/client-py.git' + '@' + '4bd9152f8a975d57c972c071dfb4ddf668e02200',
|
||||
Var('chromium_url') + '/infra/luci/client-py.git' + '@' + '88229872dd17e71658fe96763feaa77915d8cbd6',
|
||||
'v8/testing/gtest':
|
||||
Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '6f8a66431cb592dad629028a50b3dd418a408c87',
|
||||
'v8/testing/gmock':
|
||||
@ -47,15 +52,15 @@ deps = {
|
||||
'v8/test/mozilla/data':
|
||||
Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be',
|
||||
'v8/test/test262/data':
|
||||
Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '5d4c667b271a9b39d0de73aef5ffe6879c6f8811',
|
||||
Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '8311965251953d4745aeb68c98fb71fab2eac1d0',
|
||||
'v8/test/test262/harness':
|
||||
Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '0f2acdd882c84cff43b9d60df7574a1901e2cdcd',
|
||||
'v8/tools/clang':
|
||||
Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '8688d267571de76a56746324dcc249bf4232b85a',
|
||||
Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '27088876ff821e8a1518383576a43662a3255d56',
|
||||
'v8/tools/luci-go':
|
||||
Var('chromium_url') + '/chromium/src/tools/luci-go.git' + '@' + '45a8a51fda92e123619a69e7644d9c64a320b0c1',
|
||||
Var('chromium_url') + '/chromium/src/tools/luci-go.git' + '@' + 'd882048313f6f51df29856406fa03b620c1d0205',
|
||||
'v8/test/wasm-js':
|
||||
Var('chromium_url') + '/external/github.com/WebAssembly/spec.git' + '@' + 'a7e226a92e660a3d5413cfea4269824f513259d2',
|
||||
Var('chromium_url') + '/external/github.com/WebAssembly/spec.git' + '@' + 'a25083ac7076b05e3f304ec9e093ef1b1ee09422',
|
||||
}
|
||||
|
||||
recursedeps = [
|
||||
@ -93,7 +98,7 @@ hooks = [
|
||||
{
|
||||
'name': 'clang_format_win',
|
||||
'pattern': '.',
|
||||
'condition': 'host_os == "win"',
|
||||
'condition': 'host_os == "win" and build_for_node != True',
|
||||
'action': [ 'download_from_google_storage',
|
||||
'--no_resume',
|
||||
'--platform=win32',
|
||||
@ -105,7 +110,7 @@ hooks = [
|
||||
{
|
||||
'name': 'clang_format_mac',
|
||||
'pattern': '.',
|
||||
'condition': 'host_os == "mac"',
|
||||
'condition': 'host_os == "mac" and build_for_node != True',
|
||||
'action': [ 'download_from_google_storage',
|
||||
'--no_resume',
|
||||
'--platform=darwin',
|
||||
@ -117,7 +122,7 @@ hooks = [
|
||||
{
|
||||
'name': 'clang_format_linux',
|
||||
'pattern': '.',
|
||||
'condition': 'host_os == "linux"',
|
||||
'condition': 'host_os == "linux" and build_for_node != True',
|
||||
'action': [ 'download_from_google_storage',
|
||||
'--no_resume',
|
||||
'--platform=linux*',
|
||||
@ -129,6 +134,7 @@ hooks = [
|
||||
{
|
||||
'name': 'gcmole',
|
||||
'pattern': '.',
|
||||
'condition': 'build_for_node != True',
|
||||
# TODO(machenbach): Insert condition and remove GYP_DEFINES dependency.
|
||||
'action': [
|
||||
'python',
|
||||
@ -138,6 +144,7 @@ hooks = [
|
||||
{
|
||||
'name': 'jsfunfuzz',
|
||||
'pattern': '.',
|
||||
'condition': 'build_for_node != True',
|
||||
# TODO(machenbach): Insert condition and remove GYP_DEFINES dependency.
|
||||
'action': [
|
||||
'python',
|
||||
@ -148,7 +155,7 @@ hooks = [
|
||||
{
|
||||
'name': 'luci-go_win',
|
||||
'pattern': '.',
|
||||
'condition': 'host_os == "win"',
|
||||
'condition': 'host_os == "win" and build_for_node != True',
|
||||
'action': [ 'download_from_google_storage',
|
||||
'--no_resume',
|
||||
'--platform=win32',
|
||||
@ -160,7 +167,7 @@ hooks = [
|
||||
{
|
||||
'name': 'luci-go_mac',
|
||||
'pattern': '.',
|
||||
'condition': 'host_os == "mac"',
|
||||
'condition': 'host_os == "mac" and build_for_node != True',
|
||||
'action': [ 'download_from_google_storage',
|
||||
'--no_resume',
|
||||
'--platform=darwin',
|
||||
@ -172,7 +179,7 @@ hooks = [
|
||||
{
|
||||
'name': 'luci-go_linux',
|
||||
'pattern': '.',
|
||||
'condition': 'host_os == "linux"',
|
||||
'condition': 'host_os == "linux" and build_for_node != True',
|
||||
'action': [ 'download_from_google_storage',
|
||||
'--no_resume',
|
||||
'--platform=linux*',
|
||||
@ -221,6 +228,7 @@ hooks = [
|
||||
{
|
||||
'name': 'wasm_spec_tests',
|
||||
'pattern': '.',
|
||||
'condition': 'build_for_node != True',
|
||||
'action': [ 'download_from_google_storage',
|
||||
'--no_resume',
|
||||
'--no_auth',
|
||||
@ -232,6 +240,7 @@ hooks = [
|
||||
{
|
||||
'name': 'closure_compiler',
|
||||
'pattern': '.',
|
||||
'condition': 'build_for_node != True',
|
||||
'action': [ 'download_from_google_storage',
|
||||
'--no_resume',
|
||||
'--no_auth',
|
||||
@ -246,6 +255,7 @@ hooks = [
|
||||
# change.
|
||||
'name': 'sysroot',
|
||||
'pattern': '.',
|
||||
'condition': 'build_for_node != True',
|
||||
'action': [
|
||||
'python',
|
||||
'v8/build/linux/sysroot_scripts/install-sysroot.py',
|
||||
@ -287,7 +297,7 @@ hooks = [
|
||||
{
|
||||
'name': 'binutils',
|
||||
'pattern': 'v8/third_party/binutils',
|
||||
'condition': 'host_os == "linux"',
|
||||
'condition': 'host_os == "linux" and build_for_node != True',
|
||||
'action': [
|
||||
'python',
|
||||
'v8/third_party/binutils/download.py',
|
||||
@ -313,6 +323,7 @@ hooks = [
|
||||
# A change to a .gyp, .gypi, or to GYP itself should run the generator.
|
||||
'name': 'regyp_if_needed',
|
||||
'pattern': '.',
|
||||
'condition': 'build_for_node != True',
|
||||
'action': ['python', 'v8/gypfiles/gyp_v8', '--running-as-hook'],
|
||||
},
|
||||
# Download and initialize "vpython" VirtualEnv environment packages.
|
||||
|
2
deps/v8/PRESUBMIT.py
vendored
2
deps/v8/PRESUBMIT.py
vendored
@ -430,6 +430,6 @@ def PostUploadHook(cl, change, output_api):
|
||||
return output_api.EnsureCQIncludeTrybotsAreAdded(
|
||||
cl,
|
||||
[
|
||||
'master.tryserver.v8:v8_linux_noi18n_rel_ng'
|
||||
'luci.v8.try:v8_linux_noi18n_rel_ng'
|
||||
],
|
||||
'Automatically added noi18n trybots to run tests on CQ.')
|
||||
|
2
deps/v8/build_overrides/build.gni
vendored
2
deps/v8/build_overrides/build.gni
vendored
@ -8,7 +8,7 @@ build_with_chromium = false
|
||||
|
||||
# Uncomment these to specify a different NDK location and version in
|
||||
# non-Chromium builds.
|
||||
# default_android_ndk_root = "//third_party/android_tools/ndk"
|
||||
# default_android_ndk_root = "//third_party/android_ndk"
|
||||
# default_android_ndk_version = "r10e"
|
||||
|
||||
# Some non-Chromium builds don't support building java targets.
|
||||
|
5
deps/v8/gni/v8.gni
vendored
5
deps/v8/gni/v8.gni
vendored
@ -45,6 +45,9 @@ declare_args() {
|
||||
|
||||
# Use static libraries instead of source_sets.
|
||||
v8_static_library = false
|
||||
|
||||
# Enable monolithic static library for embedders.
|
||||
v8_monolithic = false
|
||||
}
|
||||
|
||||
if (v8_use_external_startup_data == "") {
|
||||
@ -97,7 +100,7 @@ if (v8_code_coverage && !is_clang) {
|
||||
]
|
||||
}
|
||||
|
||||
if (is_posix && v8_enable_backtrace) {
|
||||
if (is_posix && (v8_enable_backtrace || v8_monolithic)) {
|
||||
v8_remove_configs += [ "//build/config/gcc:symbol_visibility_hidden" ]
|
||||
v8_add_configs += [ "//build/config/gcc:symbol_visibility_default" ]
|
||||
}
|
||||
|
1
deps/v8/gypfiles/all.gyp
vendored
1
deps/v8/gypfiles/all.gyp
vendored
@ -33,6 +33,7 @@
|
||||
'../test/benchmarks/benchmarks.gyp:*',
|
||||
'../test/debugger/debugger.gyp:*',
|
||||
'../test/default.gyp:*',
|
||||
'../test/d8_default.gyp:*',
|
||||
'../test/intl/intl.gyp:*',
|
||||
'../test/message/message.gyp:*',
|
||||
'../test/mjsunit/mjsunit.gyp:*',
|
||||
|
2
deps/v8/gypfiles/standalone.gypi
vendored
2
deps/v8/gypfiles/standalone.gypi
vendored
@ -296,7 +296,7 @@
|
||||
'variables': {
|
||||
# The Android toolchain needs to use the absolute path to the NDK
|
||||
# because it is used at different levels in the GYP files.
|
||||
'android_ndk_root%': '<(base_dir)/third_party/android_tools/ndk/',
|
||||
'android_ndk_root%': '<(base_dir)/third_party/android_ndk/',
|
||||
'android_host_arch%': "<!(uname -m | sed -e 's/i[3456]86/x86/')",
|
||||
# Version of the NDK. Used to ensure full rebuilds on NDK rolls.
|
||||
'android_ndk_version%': 'r12b',
|
||||
|
5
deps/v8/include/v8-inspector.h
vendored
5
deps/v8/include/v8-inspector.h
vendored
@ -149,8 +149,9 @@ class V8_EXPORT V8InspectorSession {
|
||||
|
||||
// Remote objects.
|
||||
virtual std::unique_ptr<protocol::Runtime::API::RemoteObject> wrapObject(
|
||||
v8::Local<v8::Context>, v8::Local<v8::Value>,
|
||||
const StringView& groupName) = 0;
|
||||
v8::Local<v8::Context>, v8::Local<v8::Value>, const StringView& groupName,
|
||||
bool generatePreview) = 0;
|
||||
|
||||
virtual bool unwrapObject(std::unique_ptr<StringBuffer>* error,
|
||||
const StringView& objectId, v8::Local<v8::Value>*,
|
||||
v8::Local<v8::Context>*,
|
||||
|
92
deps/v8/include/v8-platform.h
vendored
92
deps/v8/include/v8-platform.h
vendored
@ -166,6 +166,74 @@ class TracingController {
|
||||
virtual void RemoveTraceStateObserver(TraceStateObserver*) {}
|
||||
};
|
||||
|
||||
/**
|
||||
* A V8 memory page allocator.
|
||||
*
|
||||
* Can be implemented by an embedder to manage large host OS allocations.
|
||||
*/
|
||||
class PageAllocator {
|
||||
public:
|
||||
virtual ~PageAllocator() = default;
|
||||
|
||||
/**
|
||||
* Gets the page granularity for AllocatePages and FreePages. Addresses and
|
||||
* lengths for those calls should be multiples of AllocatePageSize().
|
||||
*/
|
||||
virtual size_t AllocatePageSize() = 0;
|
||||
|
||||
/**
|
||||
* Gets the page granularity for SetPermissions and ReleasePages. Addresses
|
||||
* and lengths for those calls should be multiples of CommitPageSize().
|
||||
*/
|
||||
virtual size_t CommitPageSize() = 0;
|
||||
|
||||
/**
|
||||
* Sets the random seed so that GetRandomMmapAddr() will generate repeatable
|
||||
* sequences of random mmap addresses.
|
||||
*/
|
||||
virtual void SetRandomMmapSeed(int64_t seed) = 0;
|
||||
|
||||
/**
|
||||
* Returns a randomized address, suitable for memory allocation under ASLR.
|
||||
* The address will be aligned to AllocatePageSize.
|
||||
*/
|
||||
virtual void* GetRandomMmapAddr() = 0;
|
||||
|
||||
/**
|
||||
* Memory permissions.
|
||||
*/
|
||||
enum Permission {
|
||||
kNoAccess,
|
||||
kReadWrite,
|
||||
// TODO(hpayer): Remove this flag. Memory should never be rwx.
|
||||
kReadWriteExecute,
|
||||
kReadExecute
|
||||
};
|
||||
|
||||
/**
|
||||
* Allocates memory in range with the given alignment and permission.
|
||||
*/
|
||||
virtual void* AllocatePages(void* address, size_t length, size_t alignment,
|
||||
Permission permissions) = 0;
|
||||
|
||||
/**
|
||||
* Frees memory in a range that was allocated by a call to AllocatePages.
|
||||
*/
|
||||
virtual bool FreePages(void* address, size_t length) = 0;
|
||||
|
||||
/**
|
||||
* Releases memory in a range that was allocated by a call to AllocatePages.
|
||||
*/
|
||||
virtual bool ReleasePages(void* address, size_t length,
|
||||
size_t new_length) = 0;
|
||||
|
||||
/**
|
||||
* Sets permissions on pages in an allocated range.
|
||||
*/
|
||||
virtual bool SetPermissions(void* address, size_t length,
|
||||
Permission permissions) = 0;
|
||||
};
|
||||
|
||||
/**
|
||||
* V8 Platform abstraction layer.
|
||||
*
|
||||
@ -186,6 +254,14 @@ class Platform {
|
||||
|
||||
virtual ~Platform() = default;
|
||||
|
||||
/**
|
||||
* Allows the embedder to manage memory page allocations.
|
||||
*/
|
||||
virtual PageAllocator* GetPageAllocator() {
|
||||
// TODO(bbudge) Make this abstract after all embedders implement this.
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
/**
|
||||
* Enables the embedder to respond in cases where V8 can't allocate large
|
||||
* blocks of memory. V8 retries the failed allocation once after calling this
|
||||
@ -193,7 +269,21 @@ class Platform {
|
||||
* error.
|
||||
* Embedder overrides of this function must NOT call back into V8.
|
||||
*/
|
||||
virtual void OnCriticalMemoryPressure() {}
|
||||
virtual void OnCriticalMemoryPressure() {
|
||||
// TODO(bbudge) Remove this when embedders override the following method.
|
||||
// See crbug.com/634547.
|
||||
}
|
||||
|
||||
/**
|
||||
* Enables the embedder to respond in cases where V8 can't allocate large
|
||||
* memory regions. The |length| parameter is the amount of memory needed.
|
||||
* Returns true if memory is now available. Returns false if no memory could
|
||||
* be made available. V8 will retry allocations until this method returns
|
||||
* false.
|
||||
*
|
||||
* Embedder overrides of this function must NOT call back into V8.
|
||||
*/
|
||||
virtual bool OnCriticalMemoryPressure(size_t length) { return false; }
|
||||
|
||||
/**
|
||||
* Gets the number of threads that are used to execute background tasks. Is
|
||||
|
6
deps/v8/include/v8-version.h
vendored
6
deps/v8/include/v8-version.h
vendored
@ -9,9 +9,9 @@
|
||||
// NOTE these macros are used by some of the tool scripts and the build
|
||||
// system so their names cannot be changed without changing the scripts.
|
||||
#define V8_MAJOR_VERSION 6
|
||||
#define V8_MINOR_VERSION 4
|
||||
#define V8_BUILD_NUMBER 388
|
||||
#define V8_PATCH_LEVEL 46
|
||||
#define V8_MINOR_VERSION 5
|
||||
#define V8_BUILD_NUMBER 254
|
||||
#define V8_PATCH_LEVEL 31
|
||||
|
||||
// Use 1 for candidates and 0 otherwise.
|
||||
// (Boolean macro values are not supported by all preprocessors.)
|
||||
|
566
deps/v8/include/v8.h
vendored
566
deps/v8/include/v8.h
vendored
File diff suppressed because it is too large
Load Diff
29
deps/v8/infra/config/PRESUBMIT.py
vendored
Normal file
29
deps/v8/infra/config/PRESUBMIT.py
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
# Copyright 2018 the V8 project authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
"""Presubmit script for changes in the infrastructure configs.
|
||||
|
||||
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
|
||||
for more details about the presubmit API built into gcl.
|
||||
"""
|
||||
|
||||
|
||||
def _CommonChecks(input_api, output_api):
|
||||
"""Checks common to both upload and commit."""
|
||||
results = []
|
||||
results.extend(
|
||||
input_api.canned_checks.CheckChangedLUCIConfigs(input_api, output_api))
|
||||
return results
|
||||
|
||||
|
||||
def CheckChangeOnUpload(input_api, output_api):
|
||||
results = []
|
||||
results.extend(_CommonChecks(input_api, output_api))
|
||||
return results
|
||||
|
||||
|
||||
def CheckChangeOnCommit(input_api, output_api):
|
||||
results = []
|
||||
results.extend(_CommonChecks(input_api, output_api))
|
||||
return results
|
46
deps/v8/infra/config/cq.cfg
vendored
46
deps/v8/infra/config/cq.cfg
vendored
@ -25,23 +25,12 @@ verifiers {
|
||||
name: "luci.v8.try"
|
||||
builders { name: "v8_android_arm_compile_rel" }
|
||||
builders { name: "v8_fuchsia_rel_ng" }
|
||||
builders { name: "v8_linux64_gcc_compile_dbg" }
|
||||
builders { name: "v8_linux_gcc_compile_rel" }
|
||||
builders { name: "v8_linux_shared_compile_rel" }
|
||||
builders { name: "v8_presubmit" }
|
||||
builders {
|
||||
name: "v8_win64_msvc_compile_rel"
|
||||
experiment_percentage: 20
|
||||
}
|
||||
}
|
||||
buckets {
|
||||
name: "master.tryserver.v8"
|
||||
builders { name: "v8_node_linux64_rel" }
|
||||
builders { name: "v8_linux64_asan_rel_ng" }
|
||||
builders {
|
||||
name: "v8_linux64_asan_rel_ng_triggered"
|
||||
triggered_by: "v8_linux64_asan_rel_ng"
|
||||
}
|
||||
builders { name: "v8_linux64_gcc_compile_dbg" }
|
||||
builders { name: "v8_linux64_gyp_rel_ng" }
|
||||
builders {
|
||||
name: "v8_linux64_gyp_rel_ng_triggered"
|
||||
@ -52,6 +41,10 @@ verifiers {
|
||||
name: "v8_linux64_rel_ng_triggered"
|
||||
triggered_by: "v8_linux64_rel_ng"
|
||||
}
|
||||
builders {
|
||||
name: "v8_linux64_sanitizer_coverage_rel"
|
||||
experiment_percentage: 100
|
||||
}
|
||||
builders { name: "v8_linux64_verify_csa_rel_ng" }
|
||||
builders {
|
||||
name: "v8_linux64_verify_csa_rel_ng_triggered"
|
||||
@ -67,14 +60,19 @@ verifiers {
|
||||
name: "v8_linux_arm_rel_ng_triggered"
|
||||
triggered_by: "v8_linux_arm_rel_ng"
|
||||
}
|
||||
builders {
|
||||
name: "v8_linux_blink_rel"
|
||||
experiment_percentage: 100
|
||||
}
|
||||
builders { name: "v8_linux_chromium_gn_rel" }
|
||||
builders { name: "v8_linux_dbg_ng" }
|
||||
builders {
|
||||
name: "v8_linux_dbg_ng_triggered"
|
||||
triggered_by: "v8_linux_dbg_ng"
|
||||
}
|
||||
builders { name: "v8_linux_mipsel_compile_rel" }
|
||||
builders { name: "v8_linux_gcc_compile_rel" }
|
||||
builders { name: "v8_linux_mips64el_compile_rel" }
|
||||
builders { name: "v8_linux_mipsel_compile_rel" }
|
||||
builders { name: "v8_linux_nodcheck_rel_ng" }
|
||||
builders {
|
||||
name: "v8_linux_nodcheck_rel_ng_triggered"
|
||||
@ -85,6 +83,7 @@ verifiers {
|
||||
name: "v8_linux_rel_ng_triggered"
|
||||
triggered_by: "v8_linux_rel_ng"
|
||||
}
|
||||
builders { name: "v8_linux_shared_compile_rel" }
|
||||
builders { name: "v8_linux_verify_csa_rel_ng" }
|
||||
builders {
|
||||
name: "v8_linux_verify_csa_rel_ng_triggered"
|
||||
@ -95,6 +94,12 @@ verifiers {
|
||||
name: "v8_mac_rel_ng_triggered"
|
||||
triggered_by: "v8_mac_rel_ng"
|
||||
}
|
||||
builders { name: "v8_node_linux64_rel" }
|
||||
builders { name: "v8_presubmit" }
|
||||
builders {
|
||||
name: "v8_win64_msvc_compile_rel"
|
||||
experiment_percentage: 20
|
||||
}
|
||||
builders { name: "v8_win64_rel_ng" }
|
||||
builders {
|
||||
name: "v8_win64_rel_ng_triggered"
|
||||
@ -111,21 +116,6 @@ verifiers {
|
||||
name: "v8_win_rel_ng_triggered"
|
||||
triggered_by: "v8_win_rel_ng"
|
||||
}
|
||||
builders {
|
||||
name: "v8_linux_blink_rel"
|
||||
experiment_percentage: 100
|
||||
}
|
||||
builders {
|
||||
name: "v8_linux64_sanitizer_coverage_rel"
|
||||
experiment_percentage: 100
|
||||
}
|
||||
}
|
||||
buckets {
|
||||
name: "master.tryserver.chromium.win"
|
||||
builders {
|
||||
name: "win_chromium_compile_dbg_ng"
|
||||
experiment_percentage: 100
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
19
deps/v8/infra/mb/mb_config.pyl
vendored
19
deps/v8/infra/mb/mb_config.pyl
vendored
@ -204,7 +204,8 @@
|
||||
'v8_linux_gcc_rel': 'gn_release_x86_gcc_minimal_symbols',
|
||||
'v8_linux_shared_compile_rel': 'gn_release_x86_shared_verify_heap',
|
||||
'v8_linux64_gcc_compile_dbg': 'gn_debug_x64_gcc',
|
||||
'v8_linux64_rel_ng': 'gn_release_x64_trybot',
|
||||
'v8_linux64_fyi_rel_ng': 'gn_release_x64_test_features_trybot',
|
||||
'v8_linux64_rel_ng': 'gn_release_x64_test_features_trybot',
|
||||
'v8_linux64_verify_csa_rel_ng': 'gn_release_x64_verify_csa',
|
||||
'v8_linux64_gyp_rel_ng': 'gyp_release_x64',
|
||||
'v8_linux64_asan_rel_ng': 'gn_release_x64_asan_minimal_symbols',
|
||||
@ -428,6 +429,8 @@
|
||||
'gn', 'release_bot', 'x64', 'minimal_symbols', 'swarming'],
|
||||
'gn_release_x64_trybot': [
|
||||
'gn', 'release_trybot', 'x64', 'swarming'],
|
||||
'gn_release_x64_test_features_trybot': [
|
||||
'gn', 'release_trybot', 'x64', 'swarming', 'v8_enable_test_features'],
|
||||
'gn_release_x64_tsan': [
|
||||
'gn', 'release_bot', 'x64', 'tsan', 'swarming'],
|
||||
'gn_release_x64_tsan_concurrent_marking': [
|
||||
@ -555,6 +558,7 @@
|
||||
},
|
||||
|
||||
'asan': {
|
||||
'mixins': ['v8_enable_test_features'],
|
||||
'gn_args': 'is_asan=true',
|
||||
'gyp_defines': 'clang=1 asan=1',
|
||||
},
|
||||
@ -565,12 +569,14 @@
|
||||
},
|
||||
|
||||
'cfi': {
|
||||
'mixins': ['v8_enable_test_features'],
|
||||
'gn_args': ('is_cfi=true use_cfi_cast=true use_cfi_diag=true '
|
||||
'use_cfi_recover=false'),
|
||||
'gyp_defines': 'cfi_vptr=1 cfi_diag=1',
|
||||
},
|
||||
|
||||
'cfi_clusterfuzz': {
|
||||
'mixins': ['v8_enable_test_features'],
|
||||
'gn_args': ('is_cfi=true use_cfi_cast=true use_cfi_diag=true '
|
||||
'use_cfi_recover=true'),
|
||||
'gyp_defines': 'cfi_vptr=1 cfi_diag=1',
|
||||
@ -647,6 +653,7 @@
|
||||
},
|
||||
|
||||
'lsan': {
|
||||
'mixins': ['v8_enable_test_features'],
|
||||
'gn_args': 'is_lsan=true',
|
||||
'gyp_defines': 'lsan=1',
|
||||
},
|
||||
@ -662,11 +669,13 @@
|
||||
},
|
||||
|
||||
'msan': {
|
||||
'mixins': ['v8_enable_test_features'],
|
||||
'gn_args': ('is_msan=true msan_track_origins=2 '
|
||||
'use_prebuilt_instrumented_libraries=true'),
|
||||
},
|
||||
|
||||
'msan_no_origins': {
|
||||
'mixins': ['v8_enable_test_features'],
|
||||
'gn_args': ('is_msan=true msan_track_origins=0 '
|
||||
'use_prebuilt_instrumented_libraries=true'),
|
||||
},
|
||||
@ -756,11 +765,13 @@
|
||||
},
|
||||
|
||||
'tsan': {
|
||||
'mixins': ['v8_enable_test_features'],
|
||||
'gn_args': 'is_tsan=true',
|
||||
'gyp_defines': 'clang=1 tsan=1',
|
||||
},
|
||||
|
||||
'ubsan_vptr': {
|
||||
'mixins': ['v8_enable_test_features'],
|
||||
# TODO(krasin): Remove is_ubsan_no_recover=true when
|
||||
# https://llvm.org/bugs/show_bug.cgi?id=25569 is fixed and just use
|
||||
# ubsan_vptr instead.
|
||||
@ -768,6 +779,7 @@
|
||||
},
|
||||
|
||||
'ubsan_vptr_recover': {
|
||||
'mixins': ['v8_enable_test_features'],
|
||||
# Ubsan vptr with recovery.
|
||||
'gn_args': 'is_ubsan_vptr=true is_ubsan_no_recover=false',
|
||||
},
|
||||
@ -782,6 +794,7 @@
|
||||
},
|
||||
|
||||
'v8_correctness_fuzzer': {
|
||||
'mixins': ['v8_enable_test_features'],
|
||||
'gn_args': 'v8_correctness_fuzzer=true v8_multi_arch_build=true',
|
||||
},
|
||||
|
||||
@ -795,6 +808,10 @@
|
||||
'gyp_defines': 'v8_enable_slow_dchecks=1',
|
||||
},
|
||||
|
||||
'v8_enable_test_features': {
|
||||
'gn_args': 'v8_enable_test_features=true',
|
||||
},
|
||||
|
||||
'v8_enable_verify_predictable': {
|
||||
'gn_args': 'v8_enable_verify_predictable=true',
|
||||
'gyp_defines': 'v8_enable_verify_predictable=1',
|
||||
|
36
deps/v8/infra/testing/README.md
vendored
36
deps/v8/infra/testing/README.md
vendored
@ -1,8 +1,15 @@
|
||||
# Src-side test specifications
|
||||
|
||||
The infra/testing folder in V8 contains test specifications, consumed and
|
||||
executed by the continuous infrastructure. Every master has an optional file
|
||||
named `<mastername>.pyl`. E.g. `tryserver.v8.pyl`.
|
||||
Src-side test specifications enable developers to quickly add tests running on
|
||||
specific bots on V8's continuous infrastructure (CI) or tryserver. Features to
|
||||
be tested must live behind runtime flags, which are mapped to named testing
|
||||
variants specified [here](https://chromium.googlesource.com/v8/v8/+/master/tools/testrunner/local/variants.py).
|
||||
Changes to src-side test specifications go through CQ like any other CL and
|
||||
require tests added for specific trybots to pass.
|
||||
|
||||
The test specifications are defined in a V8-side folder called infra/testing.
|
||||
Every master has an optional file named `<mastername>.pyl`. E.g.
|
||||
`tryserver.v8.pyl`.
|
||||
|
||||
The structure of each file is:
|
||||
```
|
||||
@ -21,10 +28,10 @@ The structure of each file is:
|
||||
The `<buildername>` is a string name of the builder to execute the tests.
|
||||
`<test-spec name>` is a label defining a test specification matching the
|
||||
[infra-side](https://chromium.googlesource.com/chromium/tools/build/+/master/scripts/slave/recipe_modules/v8/testing.py#58).
|
||||
The `<variant name>` is a testing variant as specified in
|
||||
`v8/tools/testrunner/local/variants.py`. `<number of shards>` is optional
|
||||
(default 1), but can be provided to increase the swarming shards for
|
||||
long-running tests.
|
||||
The `<variant name>` is a testing variant specified
|
||||
[here](https://chromium.googlesource.com/v8/v8/+/master/tools/testrunner/local/variants.py).
|
||||
`<number of shards>` is optional (default 1), but can be provided to increase
|
||||
the swarming shards for long-running tests.
|
||||
|
||||
Example:
|
||||
```
|
||||
@ -47,4 +54,17 @@ tryserver.v8:
|
||||
client.v8:
|
||||
V8 Linux64
|
||||
V8 Linux64 - debug
|
||||
```
|
||||
```
|
||||
|
||||
Please only add tests that are expected to pass, or skip failing tests via
|
||||
status file for the selected testing variants only. If you want to add FYI tests
|
||||
(i.e. not closing the tree and not blocking CQ) you can do so for the following
|
||||
set of bots:
|
||||
|
||||
```
|
||||
tryserver.v8:
|
||||
v8_linux64_fyi_rel_ng_triggered
|
||||
client.v8:
|
||||
V8 Linux64 - fyi
|
||||
V8 Linux64 - debug - fyi
|
||||
```
|
||||
|
37
deps/v8/infra/testing/client.v8.pyl
vendored
37
deps/v8/infra/testing/client.v8.pyl
vendored
@ -10,4 +10,39 @@
|
||||
# 'V8 Linux64 - debug': [
|
||||
# {'name': 'benchmarks', 'variant': 'default', 'shards': 1},
|
||||
# ],
|
||||
}
|
||||
|
||||
'V8 Linux - debug': [
|
||||
{'name': 'd8testing', 'variant': 'code_serializer', 'shards': 1},
|
||||
{'name': 'mozilla', 'variant': 'code_serializer', 'shards': 1},
|
||||
{'name': 'test262_variants', 'variant': 'code_serializer', 'shards': 1},
|
||||
{'name': 'benchmarks', 'variant': 'code_serializer', 'shards': 1},
|
||||
],
|
||||
'V8 Linux - gc stress': [
|
||||
{'name': 'mjsunit', 'variant': 'slow_path', 'shards': 2},
|
||||
],
|
||||
'V8 Linux64': [
|
||||
{'name': 'v8testing', 'variant': 'minor_mc', 'shards': 1},
|
||||
],
|
||||
'V8 Linux64 - debug': [
|
||||
{'name': 'v8testing', 'variant': 'minor_mc', 'shards': 1},
|
||||
{'name': 'v8testing', 'variant': 'slow_path', 'shards': 1},
|
||||
],
|
||||
'V8 Linux64 ASAN': [
|
||||
{'name': 'v8testing', 'variant': 'slow_path', 'shards': 1},
|
||||
],
|
||||
'V8 Linux64 TSAN': [
|
||||
{'name': 'v8testing', 'variant': 'slow_path', 'shards': 1},
|
||||
],
|
||||
'V8 Linux64 - fyi': [
|
||||
{'name': 'v8testing', 'variant': 'infra_staging', 'shards': 1},
|
||||
{'name': 'test262_variants', 'variant': 'infra_staging', 'shards': 2},
|
||||
{'name': 'mjsunit', 'variant': 'stress_sampling', 'shards': 1},
|
||||
{'name': 'webkit', 'variant': 'stress_sampling', 'shards': 1},
|
||||
],
|
||||
'V8 Linux64 - debug - fyi': [
|
||||
{'name': 'v8testing', 'variant': 'infra_staging', 'shards': 2},
|
||||
{'name': 'test262_variants', 'variant': 'infra_staging', 'shards': 3},
|
||||
{'name': 'mjsunit', 'variant': 'stress_sampling', 'shards': 1},
|
||||
{'name': 'webkit', 'variant': 'stress_sampling', 'shards': 1},
|
||||
],
|
||||
}
|
||||
|
22
deps/v8/infra/testing/tryserver.v8.pyl
vendored
22
deps/v8/infra/testing/tryserver.v8.pyl
vendored
@ -7,4 +7,24 @@
|
||||
# 'v8_linux64_rel_ng_triggered': [
|
||||
# {'name': 'benchmarks', 'variant': 'default', 'shards': 1},
|
||||
# ],
|
||||
}
|
||||
|
||||
'v8_linux64_fyi_rel_ng_triggered': [
|
||||
{'name': 'v8testing', 'variant': 'infra_staging', 'shards': 2},
|
||||
{'name': 'test262_variants', 'variant': 'infra_staging', 'shards': 2},
|
||||
{'name': 'mjsunit', 'variant': 'stress_sampling', 'shards': 1},
|
||||
{'name': 'webkit', 'variant': 'stress_sampling', 'shards': 1},
|
||||
],
|
||||
'v8_linux64_rel_ng_triggered': [
|
||||
{'name': 'v8testing', 'variant': 'minor_mc', 'shards': 1},
|
||||
{'name': 'v8testing', 'variant': 'slow_path', 'shards': 1},
|
||||
],
|
||||
'v8_linux_gc_stress_dbg': [
|
||||
{'name': 'mjsunit', 'variant': 'slow_path', 'shards': 2},
|
||||
],
|
||||
'v8_linux64_asan_rel_ng_triggered': [
|
||||
{'name': 'v8testing', 'variant': 'slow_path', 'shards': 1},
|
||||
],
|
||||
'v8_linux64_tsan_rel': [
|
||||
{'name': 'v8testing', 'variant': 'slow_path', 'shards': 1},
|
||||
],
|
||||
}
|
||||
|
39
deps/v8/src/accessors.cc
vendored
39
deps/v8/src/accessors.cc
vendored
@ -102,7 +102,7 @@ void Accessors::ReconfigureToDataProperty(
|
||||
const v8::PropertyCallbackInfo<v8::Boolean>& info) {
|
||||
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
|
||||
RuntimeCallTimerScope stats_scope(
|
||||
isolate, &RuntimeCallStats::ReconfigureToDataProperty);
|
||||
isolate, RuntimeCallCounterId::kReconfigureToDataProperty);
|
||||
HandleScope scope(isolate);
|
||||
Handle<Object> receiver = Utils::OpenHandle(*info.This());
|
||||
Handle<JSObject> holder =
|
||||
@ -147,7 +147,8 @@ void Accessors::ArrayLengthGetter(
|
||||
v8::Local<v8::Name> name,
|
||||
const v8::PropertyCallbackInfo<v8::Value>& info) {
|
||||
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
|
||||
RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::ArrayLengthGetter);
|
||||
RuntimeCallTimerScope timer(isolate,
|
||||
RuntimeCallCounterId::kArrayLengthGetter);
|
||||
DisallowHeapAllocation no_allocation;
|
||||
HandleScope scope(isolate);
|
||||
JSArray* holder = JSArray::cast(*Utils::OpenHandle(*info.Holder()));
|
||||
@ -159,7 +160,8 @@ void Accessors::ArrayLengthSetter(
|
||||
v8::Local<v8::Name> name, v8::Local<v8::Value> val,
|
||||
const v8::PropertyCallbackInfo<v8::Boolean>& info) {
|
||||
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
|
||||
RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::ArrayLengthSetter);
|
||||
RuntimeCallTimerScope timer(isolate,
|
||||
RuntimeCallCounterId::kArrayLengthSetter);
|
||||
HandleScope scope(isolate);
|
||||
|
||||
DCHECK(Utils::OpenHandle(*name)->SameValue(isolate->heap()->length_string()));
|
||||
@ -272,7 +274,8 @@ void Accessors::StringLengthGetter(
|
||||
v8::Local<v8::Name> name,
|
||||
const v8::PropertyCallbackInfo<v8::Value>& info) {
|
||||
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
|
||||
RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::StringLengthGetter);
|
||||
RuntimeCallTimerScope timer(isolate,
|
||||
RuntimeCallCounterId::kStringLengthGetter);
|
||||
DisallowHeapAllocation no_allocation;
|
||||
HandleScope scope(isolate);
|
||||
|
||||
@ -546,9 +549,8 @@ void Accessors::ScriptEvalFromScriptGetter(
|
||||
Handle<Script> script(
|
||||
Script::cast(Handle<JSValue>::cast(object)->value()), isolate);
|
||||
Handle<Object> result = isolate->factory()->undefined_value();
|
||||
if (!script->eval_from_shared()->IsUndefined(isolate)) {
|
||||
Handle<SharedFunctionInfo> eval_from_shared(
|
||||
SharedFunctionInfo::cast(script->eval_from_shared()));
|
||||
if (script->has_eval_from_shared()) {
|
||||
Handle<SharedFunctionInfo> eval_from_shared(script->eval_from_shared());
|
||||
if (eval_from_shared->script()->IsScript()) {
|
||||
Handle<Script> eval_from_script(Script::cast(eval_from_shared->script()));
|
||||
result = Script::GetWrapper(eval_from_script);
|
||||
@ -608,9 +610,8 @@ void Accessors::ScriptEvalFromFunctionNameGetter(
|
||||
Handle<Script> script(
|
||||
Script::cast(Handle<JSValue>::cast(object)->value()), isolate);
|
||||
Handle<Object> result = isolate->factory()->undefined_value();
|
||||
if (!script->eval_from_shared()->IsUndefined(isolate)) {
|
||||
Handle<SharedFunctionInfo> shared(
|
||||
SharedFunctionInfo::cast(script->eval_from_shared()));
|
||||
if (script->has_eval_from_shared()) {
|
||||
Handle<SharedFunctionInfo> shared(script->eval_from_shared());
|
||||
// Find the name of the function calling eval.
|
||||
result = Handle<Object>(shared->name(), isolate);
|
||||
}
|
||||
@ -644,7 +645,7 @@ void Accessors::FunctionPrototypeGetter(
|
||||
const v8::PropertyCallbackInfo<v8::Value>& info) {
|
||||
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
|
||||
RuntimeCallTimerScope timer(isolate,
|
||||
&RuntimeCallStats::FunctionPrototypeGetter);
|
||||
RuntimeCallCounterId::kFunctionPrototypeGetter);
|
||||
HandleScope scope(isolate);
|
||||
Handle<JSFunction> function =
|
||||
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
|
||||
@ -657,7 +658,7 @@ void Accessors::FunctionPrototypeSetter(
|
||||
const v8::PropertyCallbackInfo<v8::Boolean>& info) {
|
||||
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
|
||||
RuntimeCallTimerScope timer(isolate,
|
||||
&RuntimeCallStats::FunctionPrototypeSetter);
|
||||
RuntimeCallCounterId::kFunctionPrototypeSetter);
|
||||
HandleScope scope(isolate);
|
||||
Handle<Object> value = Utils::OpenHandle(*val);
|
||||
Handle<JSFunction> object =
|
||||
@ -681,7 +682,8 @@ void Accessors::FunctionLengthGetter(
|
||||
v8::Local<v8::Name> name,
|
||||
const v8::PropertyCallbackInfo<v8::Value>& info) {
|
||||
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
|
||||
RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::FunctionLengthGetter);
|
||||
RuntimeCallTimerScope timer(isolate,
|
||||
RuntimeCallCounterId::kFunctionLengthGetter);
|
||||
HandleScope scope(isolate);
|
||||
Handle<JSFunction> function =
|
||||
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
|
||||
@ -950,16 +952,17 @@ class FrameFunctionIterator {
|
||||
private:
|
||||
MaybeHandle<JSFunction> next() {
|
||||
while (true) {
|
||||
inlined_frame_index_--;
|
||||
if (inlined_frame_index_ == -1) {
|
||||
if (inlined_frame_index_ <= 0) {
|
||||
if (!frame_iterator_.done()) {
|
||||
frame_iterator_.Advance();
|
||||
frames_.clear();
|
||||
inlined_frame_index_ = -1;
|
||||
GetFrames();
|
||||
}
|
||||
if (inlined_frame_index_ == -1) return MaybeHandle<JSFunction>();
|
||||
inlined_frame_index_--;
|
||||
}
|
||||
|
||||
--inlined_frame_index_;
|
||||
Handle<JSFunction> next_function =
|
||||
frames_[inlined_frame_index_].AsJavaScript().function();
|
||||
// Skip functions from other origins.
|
||||
@ -1057,7 +1060,7 @@ void Accessors::BoundFunctionLengthGetter(
|
||||
v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
|
||||
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
|
||||
RuntimeCallTimerScope timer(isolate,
|
||||
&RuntimeCallStats::BoundFunctionLengthGetter);
|
||||
RuntimeCallCounterId::kBoundFunctionLengthGetter);
|
||||
HandleScope scope(isolate);
|
||||
Handle<JSBoundFunction> function =
|
||||
Handle<JSBoundFunction>::cast(Utils::OpenHandle(*info.Holder()));
|
||||
@ -1084,7 +1087,7 @@ void Accessors::BoundFunctionNameGetter(
|
||||
v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
|
||||
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
|
||||
RuntimeCallTimerScope timer(isolate,
|
||||
&RuntimeCallStats::BoundFunctionNameGetter);
|
||||
RuntimeCallCounterId::kBoundFunctionNameGetter);
|
||||
HandleScope scope(isolate);
|
||||
Handle<JSBoundFunction> function =
|
||||
Handle<JSBoundFunction>::cast(Utils::OpenHandle(*info.Holder()));
|
||||
|
187
deps/v8/src/allocation.cc
vendored
187
deps/v8/src/allocation.cc
vendored
@ -6,7 +6,9 @@
|
||||
|
||||
#include <stdlib.h> // For free, malloc.
|
||||
#include "src/base/bits.h"
|
||||
#include "src/base/lazy-instance.h"
|
||||
#include "src/base/logging.h"
|
||||
#include "src/base/page-allocator.h"
|
||||
#include "src/base/platform/platform.h"
|
||||
#include "src/utils.h"
|
||||
#include "src/v8.h"
|
||||
@ -38,26 +40,44 @@ void* AlignedAllocInternal(size_t size, size_t alignment) {
|
||||
return ptr;
|
||||
}
|
||||
|
||||
// TODO(bbudge) Simplify this once all embedders implement a page allocator.
|
||||
struct InitializePageAllocator {
|
||||
static void Construct(void* page_allocator_ptr_arg) {
|
||||
auto page_allocator_ptr =
|
||||
reinterpret_cast<v8::PageAllocator**>(page_allocator_ptr_arg);
|
||||
v8::PageAllocator* page_allocator =
|
||||
V8::GetCurrentPlatform()->GetPageAllocator();
|
||||
if (page_allocator == nullptr) {
|
||||
static v8::base::PageAllocator default_allocator;
|
||||
page_allocator = &default_allocator;
|
||||
}
|
||||
*page_allocator_ptr = page_allocator;
|
||||
}
|
||||
};
|
||||
|
||||
static base::LazyInstance<v8::PageAllocator*, InitializePageAllocator>::type
|
||||
page_allocator = LAZY_INSTANCE_INITIALIZER;
|
||||
|
||||
v8::PageAllocator* GetPageAllocator() { return page_allocator.Get(); }
|
||||
|
||||
// We will attempt allocation this many times. After each failure, we call
|
||||
// OnCriticalMemoryPressure to try to free some memory.
|
||||
const int kAllocationTries = 2;
|
||||
|
||||
} // namespace
|
||||
|
||||
void* Malloced::New(size_t size) {
|
||||
void* result = malloc(size);
|
||||
void* result = AllocWithRetry(size);
|
||||
if (result == nullptr) {
|
||||
V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
|
||||
result = malloc(size);
|
||||
if (result == nullptr) {
|
||||
V8::FatalProcessOutOfMemory("Malloced operator new");
|
||||
}
|
||||
V8::FatalProcessOutOfMemory("Malloced operator new");
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
void Malloced::Delete(void* p) {
|
||||
free(p);
|
||||
}
|
||||
|
||||
|
||||
char* StrDup(const char* str) {
|
||||
int length = StrLength(str);
|
||||
char* result = NewArray<char>(length + 1);
|
||||
@ -66,7 +86,6 @@ char* StrDup(const char* str) {
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
char* StrNDup(const char* str, int n) {
|
||||
int length = StrLength(str);
|
||||
if (n < length) length = n;
|
||||
@ -76,22 +95,31 @@ char* StrNDup(const char* str, int n) {
|
||||
return result;
|
||||
}
|
||||
|
||||
void* AllocWithRetry(size_t size) {
|
||||
void* result = nullptr;
|
||||
for (int i = 0; i < kAllocationTries; ++i) {
|
||||
result = malloc(size);
|
||||
if (result != nullptr) break;
|
||||
if (!OnCriticalMemoryPressure(size)) break;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void* AlignedAlloc(size_t size, size_t alignment) {
|
||||
DCHECK_LE(V8_ALIGNOF(void*), alignment);
|
||||
DCHECK(base::bits::IsPowerOfTwo(alignment));
|
||||
void* ptr = AlignedAllocInternal(size, alignment);
|
||||
if (ptr == nullptr) {
|
||||
V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
|
||||
ptr = AlignedAllocInternal(size, alignment);
|
||||
if (ptr == nullptr) {
|
||||
V8::FatalProcessOutOfMemory("AlignedAlloc");
|
||||
}
|
||||
void* result = nullptr;
|
||||
for (int i = 0; i < kAllocationTries; ++i) {
|
||||
result = AlignedAllocInternal(size, alignment);
|
||||
if (result != nullptr) break;
|
||||
if (!OnCriticalMemoryPressure(size + alignment)) break;
|
||||
}
|
||||
return ptr;
|
||||
if (result == nullptr) {
|
||||
V8::FatalProcessOutOfMemory("AlignedAlloc");
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
void AlignedFree(void *ptr) {
|
||||
#if V8_OS_WIN
|
||||
_aligned_free(ptr);
|
||||
@ -103,27 +131,88 @@ void AlignedFree(void *ptr) {
|
||||
#endif
|
||||
}
|
||||
|
||||
byte* AllocateSystemPage(void* address, size_t* allocated) {
|
||||
size_t page_size = base::OS::AllocatePageSize();
|
||||
void* result = base::OS::Allocate(address, page_size, page_size,
|
||||
base::OS::MemoryPermission::kReadWrite);
|
||||
size_t AllocatePageSize() { return GetPageAllocator()->AllocatePageSize(); }
|
||||
|
||||
size_t CommitPageSize() { return GetPageAllocator()->CommitPageSize(); }
|
||||
|
||||
void SetRandomMmapSeed(int64_t seed) {
|
||||
GetPageAllocator()->SetRandomMmapSeed(seed);
|
||||
}
|
||||
|
||||
void* GetRandomMmapAddr() { return GetPageAllocator()->GetRandomMmapAddr(); }
|
||||
|
||||
void* AllocatePages(void* address, size_t size, size_t alignment,
|
||||
PageAllocator::Permission access) {
|
||||
void* result = nullptr;
|
||||
for (int i = 0; i < kAllocationTries; ++i) {
|
||||
result =
|
||||
GetPageAllocator()->AllocatePages(address, size, alignment, access);
|
||||
if (result != nullptr) break;
|
||||
size_t request_size = size + alignment - AllocatePageSize();
|
||||
if (!OnCriticalMemoryPressure(request_size)) break;
|
||||
}
|
||||
#if defined(LEAK_SANITIZER)
|
||||
if (result != nullptr) {
|
||||
__lsan_register_root_region(result, size);
|
||||
}
|
||||
#endif
|
||||
return result;
|
||||
}
|
||||
|
||||
bool FreePages(void* address, const size_t size) {
|
||||
bool result = GetPageAllocator()->FreePages(address, size);
|
||||
#if defined(LEAK_SANITIZER)
|
||||
if (result) {
|
||||
__lsan_unregister_root_region(address, size);
|
||||
}
|
||||
#endif
|
||||
return result;
|
||||
}
|
||||
|
||||
bool ReleasePages(void* address, size_t size, size_t new_size) {
|
||||
DCHECK_LT(new_size, size);
|
||||
bool result = GetPageAllocator()->ReleasePages(address, size, new_size);
|
||||
#if defined(LEAK_SANITIZER)
|
||||
if (result) {
|
||||
__lsan_unregister_root_region(address, size);
|
||||
__lsan_register_root_region(address, new_size);
|
||||
}
|
||||
#endif
|
||||
return result;
|
||||
}
|
||||
|
||||
bool SetPermissions(void* address, size_t size,
|
||||
PageAllocator::Permission access) {
|
||||
return GetPageAllocator()->SetPermissions(address, size, access);
|
||||
}
|
||||
|
||||
byte* AllocatePage(void* address, size_t* allocated) {
|
||||
size_t page_size = AllocatePageSize();
|
||||
void* result =
|
||||
AllocatePages(address, page_size, page_size, PageAllocator::kReadWrite);
|
||||
if (result != nullptr) *allocated = page_size;
|
||||
return static_cast<byte*>(result);
|
||||
}
|
||||
|
||||
bool OnCriticalMemoryPressure(size_t length) {
|
||||
// TODO(bbudge) Rework retry logic once embedders implement the more
|
||||
// informative overload.
|
||||
if (!V8::GetCurrentPlatform()->OnCriticalMemoryPressure(length)) {
|
||||
V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
VirtualMemory::VirtualMemory() : address_(nullptr), size_(0) {}
|
||||
|
||||
VirtualMemory::VirtualMemory(size_t size, void* hint, size_t alignment)
|
||||
: address_(nullptr), size_(0) {
|
||||
size_t page_size = base::OS::AllocatePageSize();
|
||||
size_t page_size = AllocatePageSize();
|
||||
size_t alloc_size = RoundUp(size, page_size);
|
||||
address_ = base::OS::Allocate(hint, alloc_size, alignment,
|
||||
base::OS::MemoryPermission::kNoAccess);
|
||||
address_ =
|
||||
AllocatePages(hint, alloc_size, alignment, PageAllocator::kNoAccess);
|
||||
if (address_ != nullptr) {
|
||||
size_ = alloc_size;
|
||||
#if defined(LEAK_SANITIZER)
|
||||
__lsan_register_root_region(address_, size_);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
@ -139,9 +228,9 @@ void VirtualMemory::Reset() {
|
||||
}
|
||||
|
||||
bool VirtualMemory::SetPermissions(void* address, size_t size,
|
||||
base::OS::MemoryPermission access) {
|
||||
PageAllocator::Permission access) {
|
||||
CHECK(InVM(address, size));
|
||||
bool result = base::OS::SetPermissions(address, size, access);
|
||||
bool result = v8::internal::SetPermissions(address, size, access);
|
||||
DCHECK(result);
|
||||
USE(result);
|
||||
return result;
|
||||
@ -149,8 +238,7 @@ bool VirtualMemory::SetPermissions(void* address, size_t size,
|
||||
|
||||
size_t VirtualMemory::Release(void* free_start) {
|
||||
DCHECK(IsReserved());
|
||||
DCHECK(IsAddressAligned(static_cast<Address>(free_start),
|
||||
base::OS::CommitPageSize()));
|
||||
DCHECK(IsAddressAligned(static_cast<Address>(free_start), CommitPageSize()));
|
||||
// Notice: Order is important here. The VirtualMemory object might live
|
||||
// inside the allocated region.
|
||||
const size_t free_size = size_ - (reinterpret_cast<size_t>(free_start) -
|
||||
@ -159,11 +247,7 @@ size_t VirtualMemory::Release(void* free_start) {
|
||||
DCHECK_LT(address_, free_start);
|
||||
DCHECK_LT(free_start, reinterpret_cast<void*>(
|
||||
reinterpret_cast<size_t>(address_) + size_));
|
||||
#if defined(LEAK_SANITIZER)
|
||||
__lsan_unregister_root_region(address_, size_);
|
||||
__lsan_register_root_region(address_, size_ - free_size);
|
||||
#endif
|
||||
CHECK(base::OS::Release(free_start, free_size));
|
||||
CHECK(ReleasePages(address_, size_, size_ - free_size));
|
||||
size_ -= free_size;
|
||||
return free_size;
|
||||
}
|
||||
@ -176,10 +260,7 @@ void VirtualMemory::Free() {
|
||||
size_t size = size_;
|
||||
CHECK(InVM(address, size));
|
||||
Reset();
|
||||
#if defined(LEAK_SANITIZER)
|
||||
__lsan_unregister_root_region(address, size);
|
||||
#endif
|
||||
CHECK(base::OS::Free(address, size));
|
||||
CHECK(FreePages(address, size));
|
||||
}
|
||||
|
||||
void VirtualMemory::TakeControl(VirtualMemory* from) {
|
||||
@ -190,30 +271,22 @@ void VirtualMemory::TakeControl(VirtualMemory* from) {
|
||||
}
|
||||
|
||||
bool AllocVirtualMemory(size_t size, void* hint, VirtualMemory* result) {
|
||||
VirtualMemory first_try(size, hint);
|
||||
if (first_try.IsReserved()) {
|
||||
result->TakeControl(&first_try);
|
||||
VirtualMemory vm(size, hint);
|
||||
if (vm.IsReserved()) {
|
||||
result->TakeControl(&vm);
|
||||
return true;
|
||||
}
|
||||
|
||||
V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
|
||||
VirtualMemory second_try(size, hint);
|
||||
result->TakeControl(&second_try);
|
||||
return result->IsReserved();
|
||||
return false;
|
||||
}
|
||||
|
||||
bool AlignedAllocVirtualMemory(size_t size, size_t alignment, void* hint,
|
||||
VirtualMemory* result) {
|
||||
VirtualMemory first_try(size, hint, alignment);
|
||||
if (first_try.IsReserved()) {
|
||||
result->TakeControl(&first_try);
|
||||
VirtualMemory vm(size, hint, alignment);
|
||||
if (vm.IsReserved()) {
|
||||
result->TakeControl(&vm);
|
||||
return true;
|
||||
}
|
||||
|
||||
V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
|
||||
VirtualMemory second_try(size, hint, alignment);
|
||||
result->TakeControl(&second_try);
|
||||
return result->IsReserved();
|
||||
return false;
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
|
67
deps/v8/src/allocation.h
vendored
67
deps/v8/src/allocation.h
vendored
@ -72,14 +72,68 @@ class FreeStoreAllocationPolicy {
|
||||
INLINE(static void Delete(void* p)) { Malloced::Delete(p); }
|
||||
};
|
||||
|
||||
// Performs a malloc, with retry logic on failure. Returns nullptr on failure.
|
||||
// Call free to release memory allocated with this function.
|
||||
void* AllocWithRetry(size_t size);
|
||||
|
||||
void* AlignedAlloc(size_t size, size_t alignment);
|
||||
void AlignedFree(void *ptr);
|
||||
|
||||
// Allocates a single system memory page with read/write permissions. The
|
||||
// address parameter is a hint. Returns the base address of the memory, or null
|
||||
// on failure. Permissions can be changed on the base address.
|
||||
byte* AllocateSystemPage(void* address, size_t* allocated);
|
||||
// Gets the page granularity for AllocatePages and FreePages. Addresses returned
|
||||
// by AllocatePages and AllocatePage are aligned to this size.
|
||||
V8_EXPORT_PRIVATE size_t AllocatePageSize();
|
||||
|
||||
// Gets the granularity at which the permissions and release calls can be made.
|
||||
V8_EXPORT_PRIVATE size_t CommitPageSize();
|
||||
|
||||
// Sets the random seed so that GetRandomMmapAddr() will generate repeatable
|
||||
// sequences of random mmap addresses.
|
||||
V8_EXPORT_PRIVATE void SetRandomMmapSeed(int64_t seed);
|
||||
|
||||
// Generate a random address to be used for hinting allocation calls.
|
||||
V8_EXPORT_PRIVATE void* GetRandomMmapAddr();
|
||||
|
||||
// Allocates memory. Permissions are set according to the access argument.
|
||||
// |address| is a hint. |size| and |alignment| must be multiples of
|
||||
// AllocatePageSize(). Returns the address of the allocated memory, with the
|
||||
// specified size and alignment, or nullptr on failure.
|
||||
V8_EXPORT_PRIVATE
|
||||
V8_WARN_UNUSED_RESULT void* AllocatePages(void* address, size_t size,
|
||||
size_t alignment,
|
||||
PageAllocator::Permission access);
|
||||
|
||||
// Frees memory allocated by a call to AllocatePages. |address| and |size| must
|
||||
// be multiples of AllocatePageSize(). Returns true on success, otherwise false.
|
||||
V8_EXPORT_PRIVATE
|
||||
V8_WARN_UNUSED_RESULT bool FreePages(void* address, const size_t size);
|
||||
|
||||
// Releases memory that is no longer needed. The range specified by |address|
|
||||
// and |size| must be an allocated memory region. |size| and |new_size| must be
|
||||
// multiples of CommitPageSize(). Memory from |new_size| to |size| is released.
|
||||
// Released memory is left in an undefined state, so it should not be accessed.
|
||||
// Returns true on success, otherwise false.
|
||||
V8_EXPORT_PRIVATE
|
||||
V8_WARN_UNUSED_RESULT bool ReleasePages(void* address, size_t size,
|
||||
size_t new_size);
|
||||
|
||||
// Sets permissions according to |access|. |address| and |size| must be
|
||||
// multiples of CommitPageSize(). Setting permission to kNoAccess may
|
||||
// cause the memory contents to be lost. Returns true on success, otherwise
|
||||
// false.
|
||||
V8_EXPORT_PRIVATE
|
||||
V8_WARN_UNUSED_RESULT bool SetPermissions(void* address, size_t size,
|
||||
PageAllocator::Permission access);
|
||||
|
||||
// Convenience function that allocates a single system page with read and write
|
||||
// permissions. |address| is a hint. Returns the base address of the memory and
|
||||
// the page size via |allocated| on success. Returns nullptr on failure.
|
||||
V8_EXPORT_PRIVATE
|
||||
V8_WARN_UNUSED_RESULT byte* AllocatePage(void* address, size_t* allocated);
|
||||
|
||||
// Function that may release reserved memory regions to allow failed allocations
|
||||
// to succeed. |length| is the amount of memory needed. Returns |true| if memory
|
||||
// could be released, false otherwise.
|
||||
V8_EXPORT_PRIVATE bool OnCriticalMemoryPressure(size_t length);
|
||||
|
||||
// Represents and controls an area of reserved memory.
|
||||
class V8_EXPORT_PRIVATE VirtualMemory {
|
||||
@ -90,8 +144,7 @@ class V8_EXPORT_PRIVATE VirtualMemory {
|
||||
// Reserves virtual memory containing an area of the given size that is
|
||||
// aligned per alignment. This may not be at the position returned by
|
||||
// address().
|
||||
VirtualMemory(size_t size, void* hint,
|
||||
size_t alignment = base::OS::AllocatePageSize());
|
||||
VirtualMemory(size_t size, void* hint, size_t alignment = AllocatePageSize());
|
||||
|
||||
// Construct a virtual memory by assigning it some already mapped address
|
||||
// and size.
|
||||
@ -131,7 +184,7 @@ class V8_EXPORT_PRIVATE VirtualMemory {
|
||||
// Sets permissions according to the access argument. address and size must be
|
||||
// multiples of CommitPageSize(). Returns true on success, otherwise false.
|
||||
bool SetPermissions(void* address, size_t size,
|
||||
base::OS::MemoryPermission access);
|
||||
PageAllocator::Permission access);
|
||||
|
||||
// Releases memory after |free_start|. Returns the number of bytes released.
|
||||
size_t Release(void* free_start);
|
||||
|
288
deps/v8/src/api-arguments-inl.h
vendored
288
deps/v8/src/api-arguments-inl.h
vendored
@ -13,146 +13,248 @@
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
#define SIDE_EFFECT_CHECK(ISOLATE, F, RETURN_TYPE) \
|
||||
do { \
|
||||
if (ISOLATE->needs_side_effect_check() && \
|
||||
!PerformSideEffectCheck(ISOLATE, FUNCTION_ADDR(F))) { \
|
||||
return Handle<RETURN_TYPE>(); \
|
||||
} \
|
||||
} while (false)
|
||||
#define FOR_EACH_CALLBACK(F) \
|
||||
F(Query, query, Object, v8::Integer) \
|
||||
F(Deleter, deleter, Object, v8::Boolean)
|
||||
|
||||
#define FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME(F) \
|
||||
F(AccessorNameGetterCallback, "get", v8::Value, Object) \
|
||||
F(GenericNamedPropertyQueryCallback, "has", v8::Integer, Object) \
|
||||
F(GenericNamedPropertyDeleterCallback, "delete", v8::Boolean, Object)
|
||||
#define PREPARE_CALLBACK_INFO(ISOLATE, F, RETURN_VALUE, API_RETURN_TYPE) \
|
||||
if (ISOLATE->needs_side_effect_check() && \
|
||||
!PerformSideEffectCheck(ISOLATE, FUNCTION_ADDR(F))) { \
|
||||
return RETURN_VALUE(); \
|
||||
} \
|
||||
VMState<EXTERNAL> state(ISOLATE); \
|
||||
ExternalCallbackScope call_scope(ISOLATE, FUNCTION_ADDR(F)); \
|
||||
PropertyCallbackInfo<API_RETURN_TYPE> callback_info(begin());
|
||||
|
||||
#define WRITE_CALL_1_NAME(Function, type, ApiReturn, InternalReturn) \
|
||||
Handle<InternalReturn> PropertyCallbackArguments::Call(Function f, \
|
||||
Handle<Name> name) { \
|
||||
#define CREATE_NAMED_CALLBACK(Function, type, ReturnType, ApiReturnType) \
|
||||
Handle<ReturnType> PropertyCallbackArguments::CallNamed##Function( \
|
||||
Handle<InterceptorInfo> interceptor, Handle<Name> name) { \
|
||||
DCHECK(interceptor->is_named()); \
|
||||
DCHECK(!name->IsPrivate()); \
|
||||
DCHECK_IMPLIES(name->IsSymbol(), interceptor->can_intercept_symbols()); \
|
||||
Isolate* isolate = this->isolate(); \
|
||||
SIDE_EFFECT_CHECK(isolate, f, InternalReturn); \
|
||||
RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Function); \
|
||||
VMState<EXTERNAL> state(isolate); \
|
||||
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
|
||||
PropertyCallbackInfo<ApiReturn> info(begin()); \
|
||||
RuntimeCallTimerScope timer( \
|
||||
isolate, RuntimeCallCounterId::kNamed##Function##Callback); \
|
||||
DCHECK(!name->IsPrivate()); \
|
||||
GenericNamedProperty##Function##Callback f = \
|
||||
ToCData<GenericNamedProperty##Function##Callback>( \
|
||||
interceptor->type()); \
|
||||
PREPARE_CALLBACK_INFO(isolate, f, Handle<ReturnType>, ApiReturnType); \
|
||||
LOG(isolate, \
|
||||
ApiNamedPropertyAccess("interceptor-named-" type, holder(), *name)); \
|
||||
f(v8::Utils::ToLocal(name), info); \
|
||||
return GetReturnValue<InternalReturn>(isolate); \
|
||||
ApiNamedPropertyAccess("interceptor-named-" #type, holder(), *name)); \
|
||||
f(v8::Utils::ToLocal(name), callback_info); \
|
||||
return GetReturnValue<ReturnType>(isolate); \
|
||||
}
|
||||
|
||||
FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME(WRITE_CALL_1_NAME)
|
||||
FOR_EACH_CALLBACK(CREATE_NAMED_CALLBACK)
|
||||
#undef CREATE_NAMED_CALLBACK
|
||||
|
||||
#undef FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME
|
||||
#undef WRITE_CALL_1_NAME
|
||||
|
||||
#define FOR_EACH_CALLBACK_TABLE_MAPPING_1_INDEX(F) \
|
||||
F(IndexedPropertyGetterCallback, "get", v8::Value, Object) \
|
||||
F(IndexedPropertyQueryCallback, "has", v8::Integer, Object) \
|
||||
F(IndexedPropertyDeleterCallback, "delete", v8::Boolean, Object)
|
||||
|
||||
#define WRITE_CALL_1_INDEX(Function, type, ApiReturn, InternalReturn) \
|
||||
Handle<InternalReturn> PropertyCallbackArguments::Call(Function f, \
|
||||
uint32_t index) { \
|
||||
#define CREATE_INDEXED_CALLBACK(Function, type, ReturnType, ApiReturnType) \
|
||||
Handle<ReturnType> PropertyCallbackArguments::CallIndexed##Function( \
|
||||
Handle<InterceptorInfo> interceptor, uint32_t index) { \
|
||||
DCHECK(!interceptor->is_named()); \
|
||||
Isolate* isolate = this->isolate(); \
|
||||
SIDE_EFFECT_CHECK(isolate, f, InternalReturn); \
|
||||
RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Function); \
|
||||
VMState<EXTERNAL> state(isolate); \
|
||||
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
|
||||
PropertyCallbackInfo<ApiReturn> info(begin()); \
|
||||
LOG(isolate, ApiIndexedPropertyAccess("interceptor-indexed-" type, \
|
||||
RuntimeCallTimerScope timer( \
|
||||
isolate, RuntimeCallCounterId::kIndexed##Function##Callback); \
|
||||
IndexedProperty##Function##Callback f = \
|
||||
ToCData<IndexedProperty##Function##Callback>(interceptor->type()); \
|
||||
PREPARE_CALLBACK_INFO(isolate, f, Handle<ReturnType>, ApiReturnType); \
|
||||
LOG(isolate, ApiIndexedPropertyAccess("interceptor-indexed-" #type, \
|
||||
holder(), index)); \
|
||||
f(index, info); \
|
||||
return GetReturnValue<InternalReturn>(isolate); \
|
||||
f(index, callback_info); \
|
||||
return GetReturnValue<ReturnType>(isolate); \
|
||||
}
|
||||
|
||||
FOR_EACH_CALLBACK_TABLE_MAPPING_1_INDEX(WRITE_CALL_1_INDEX)
|
||||
FOR_EACH_CALLBACK(CREATE_INDEXED_CALLBACK)
|
||||
|
||||
#undef FOR_EACH_CALLBACK_TABLE_MAPPING_1_INDEX
|
||||
#undef WRITE_CALL_1_INDEX
|
||||
#undef FOR_EACH_CALLBACK
|
||||
#undef CREATE_INDEXED_CALLBACK
|
||||
|
||||
Handle<Object> PropertyCallbackArguments::Call(
|
||||
Handle<Object> PropertyCallbackArguments::CallNamedGetter(
|
||||
Handle<InterceptorInfo> interceptor, Handle<Name> name) {
|
||||
DCHECK(interceptor->is_named());
|
||||
DCHECK_IMPLIES(name->IsSymbol(), interceptor->can_intercept_symbols());
|
||||
DCHECK(!name->IsPrivate());
|
||||
Isolate* isolate = this->isolate();
|
||||
RuntimeCallTimerScope timer(isolate,
|
||||
RuntimeCallCounterId::kNamedGetterCallback);
|
||||
LOG(isolate,
|
||||
ApiNamedPropertyAccess("interceptor-named-getter", holder(), *name));
|
||||
GenericNamedPropertyGetterCallback f =
|
||||
ToCData<GenericNamedPropertyGetterCallback>(interceptor->getter());
|
||||
return BasicCallNamedGetterCallback(f, name);
|
||||
}
|
||||
|
||||
Handle<Object> PropertyCallbackArguments::CallNamedDescriptor(
|
||||
Handle<InterceptorInfo> interceptor, Handle<Name> name) {
|
||||
DCHECK(interceptor->is_named());
|
||||
DCHECK_IMPLIES(name->IsSymbol(), interceptor->can_intercept_symbols());
|
||||
Isolate* isolate = this->isolate();
|
||||
RuntimeCallTimerScope timer(isolate,
|
||||
RuntimeCallCounterId::kNamedDescriptorCallback);
|
||||
LOG(isolate,
|
||||
ApiNamedPropertyAccess("interceptor-named-descriptor", holder(), *name));
|
||||
GenericNamedPropertyDescriptorCallback f =
|
||||
ToCData<GenericNamedPropertyDescriptorCallback>(
|
||||
interceptor->descriptor());
|
||||
return BasicCallNamedGetterCallback(f, name);
|
||||
}
|
||||
|
||||
Handle<Object> PropertyCallbackArguments::BasicCallNamedGetterCallback(
|
||||
GenericNamedPropertyGetterCallback f, Handle<Name> name) {
|
||||
DCHECK(!name->IsPrivate());
|
||||
Isolate* isolate = this->isolate();
|
||||
PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value);
|
||||
f(v8::Utils::ToLocal(name), callback_info);
|
||||
return GetReturnValue<Object>(isolate);
|
||||
}
|
||||
|
||||
Handle<Object> PropertyCallbackArguments::CallNamedSetter(
|
||||
Handle<InterceptorInfo> interceptor, Handle<Name> name,
|
||||
Handle<Object> value) {
|
||||
DCHECK_IMPLIES(name->IsSymbol(), interceptor->can_intercept_symbols());
|
||||
GenericNamedPropertySetterCallback f =
|
||||
ToCData<GenericNamedPropertySetterCallback>(interceptor->setter());
|
||||
return CallNamedSetterCallback(f, name, value);
|
||||
}
|
||||
|
||||
Handle<Object> PropertyCallbackArguments::CallNamedSetterCallback(
|
||||
GenericNamedPropertySetterCallback f, Handle<Name> name,
|
||||
Handle<Object> value) {
|
||||
DCHECK(!name->IsPrivate());
|
||||
Isolate* isolate = this->isolate();
|
||||
SIDE_EFFECT_CHECK(isolate, f, Object);
|
||||
RuntimeCallTimerScope timer(
|
||||
isolate, &RuntimeCallStats::GenericNamedPropertySetterCallback);
|
||||
VMState<EXTERNAL> state(isolate);
|
||||
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
|
||||
PropertyCallbackInfo<v8::Value> info(begin());
|
||||
RuntimeCallTimerScope timer(isolate,
|
||||
RuntimeCallCounterId::kNamedSetterCallback);
|
||||
PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value);
|
||||
LOG(isolate,
|
||||
ApiNamedPropertyAccess("interceptor-named-set", holder(), *name));
|
||||
f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info);
|
||||
f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), callback_info);
|
||||
return GetReturnValue<Object>(isolate);
|
||||
}
|
||||
|
||||
Handle<Object> PropertyCallbackArguments::Call(
|
||||
GenericNamedPropertyDefinerCallback f, Handle<Name> name,
|
||||
Handle<Object> PropertyCallbackArguments::CallNamedDefiner(
|
||||
Handle<InterceptorInfo> interceptor, Handle<Name> name,
|
||||
const v8::PropertyDescriptor& desc) {
|
||||
DCHECK(interceptor->is_named());
|
||||
DCHECK(!name->IsPrivate());
|
||||
DCHECK_IMPLIES(name->IsSymbol(), interceptor->can_intercept_symbols());
|
||||
Isolate* isolate = this->isolate();
|
||||
SIDE_EFFECT_CHECK(isolate, f, Object);
|
||||
RuntimeCallTimerScope timer(
|
||||
isolate, &RuntimeCallStats::GenericNamedPropertyDefinerCallback);
|
||||
VMState<EXTERNAL> state(isolate);
|
||||
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
|
||||
PropertyCallbackInfo<v8::Value> info(begin());
|
||||
RuntimeCallTimerScope timer(isolate,
|
||||
RuntimeCallCounterId::kNamedDefinerCallback);
|
||||
GenericNamedPropertyDefinerCallback f =
|
||||
ToCData<GenericNamedPropertyDefinerCallback>(interceptor->definer());
|
||||
PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value);
|
||||
LOG(isolate,
|
||||
ApiNamedPropertyAccess("interceptor-named-define", holder(), *name));
|
||||
f(v8::Utils::ToLocal(name), desc, info);
|
||||
f(v8::Utils::ToLocal(name), desc, callback_info);
|
||||
return GetReturnValue<Object>(isolate);
|
||||
}
|
||||
|
||||
Handle<Object> PropertyCallbackArguments::Call(IndexedPropertySetterCallback f,
|
||||
uint32_t index,
|
||||
Handle<Object> value) {
|
||||
Handle<Object> PropertyCallbackArguments::CallIndexedSetter(
|
||||
Handle<InterceptorInfo> interceptor, uint32_t index, Handle<Object> value) {
|
||||
DCHECK(!interceptor->is_named());
|
||||
Isolate* isolate = this->isolate();
|
||||
SIDE_EFFECT_CHECK(isolate, f, Object);
|
||||
RuntimeCallTimerScope timer(isolate,
|
||||
&RuntimeCallStats::IndexedPropertySetterCallback);
|
||||
VMState<EXTERNAL> state(isolate);
|
||||
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
|
||||
PropertyCallbackInfo<v8::Value> info(begin());
|
||||
RuntimeCallCounterId::kIndexedSetterCallback);
|
||||
IndexedPropertySetterCallback f =
|
||||
ToCData<IndexedPropertySetterCallback>(interceptor->setter());
|
||||
PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value);
|
||||
LOG(isolate,
|
||||
ApiIndexedPropertyAccess("interceptor-indexed-set", holder(), index));
|
||||
f(index, v8::Utils::ToLocal(value), info);
|
||||
f(index, v8::Utils::ToLocal(value), callback_info);
|
||||
return GetReturnValue<Object>(isolate);
|
||||
}
|
||||
|
||||
Handle<Object> PropertyCallbackArguments::Call(
|
||||
IndexedPropertyDefinerCallback f, uint32_t index,
|
||||
Handle<Object> PropertyCallbackArguments::CallIndexedDefiner(
|
||||
Handle<InterceptorInfo> interceptor, uint32_t index,
|
||||
const v8::PropertyDescriptor& desc) {
|
||||
DCHECK(!interceptor->is_named());
|
||||
Isolate* isolate = this->isolate();
|
||||
SIDE_EFFECT_CHECK(isolate, f, Object);
|
||||
RuntimeCallTimerScope timer(
|
||||
isolate, &RuntimeCallStats::IndexedPropertyDefinerCallback);
|
||||
VMState<EXTERNAL> state(isolate);
|
||||
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
|
||||
PropertyCallbackInfo<v8::Value> info(begin());
|
||||
RuntimeCallTimerScope timer(isolate,
|
||||
RuntimeCallCounterId::kIndexedDefinerCallback);
|
||||
IndexedPropertyDefinerCallback f =
|
||||
ToCData<IndexedPropertyDefinerCallback>(interceptor->definer());
|
||||
PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value);
|
||||
LOG(isolate,
|
||||
ApiIndexedPropertyAccess("interceptor-indexed-define", holder(), index));
|
||||
f(index, desc, info);
|
||||
f(index, desc, callback_info);
|
||||
return GetReturnValue<Object>(isolate);
|
||||
}
|
||||
|
||||
void PropertyCallbackArguments::Call(AccessorNameSetterCallback f,
|
||||
Handle<Name> name, Handle<Object> value) {
|
||||
Handle<Object> PropertyCallbackArguments::CallIndexedGetter(
|
||||
Handle<InterceptorInfo> interceptor, uint32_t index) {
|
||||
DCHECK(!interceptor->is_named());
|
||||
Isolate* isolate = this->isolate();
|
||||
if (isolate->needs_side_effect_check() &&
|
||||
!PerformSideEffectCheck(isolate, FUNCTION_ADDR(f))) {
|
||||
return;
|
||||
}
|
||||
RuntimeCallTimerScope timer(isolate,
|
||||
&RuntimeCallStats::AccessorNameSetterCallback);
|
||||
VMState<EXTERNAL> state(isolate);
|
||||
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
|
||||
PropertyCallbackInfo<void> info(begin());
|
||||
RuntimeCallCounterId::kNamedGetterCallback);
|
||||
LOG(isolate,
|
||||
ApiNamedPropertyAccess("interceptor-named-set", holder(), *name));
|
||||
f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info);
|
||||
ApiIndexedPropertyAccess("interceptor-indexed-getter", holder(), index));
|
||||
IndexedPropertyGetterCallback f =
|
||||
ToCData<IndexedPropertyGetterCallback>(interceptor->getter());
|
||||
return BasicCallIndexedGetterCallback(f, index);
|
||||
}
|
||||
|
||||
#undef SIDE_EFFECT_CHECK
|
||||
Handle<Object> PropertyCallbackArguments::CallIndexedDescriptor(
|
||||
Handle<InterceptorInfo> interceptor, uint32_t index) {
|
||||
DCHECK(!interceptor->is_named());
|
||||
Isolate* isolate = this->isolate();
|
||||
RuntimeCallTimerScope timer(isolate,
|
||||
RuntimeCallCounterId::kIndexedDescriptorCallback);
|
||||
LOG(isolate, ApiIndexedPropertyAccess("interceptor-indexed-descriptor",
|
||||
holder(), index));
|
||||
IndexedPropertyDescriptorCallback f =
|
||||
ToCData<IndexedPropertyDescriptorCallback>(interceptor->descriptor());
|
||||
return BasicCallIndexedGetterCallback(f, index);
|
||||
}
|
||||
|
||||
Handle<Object> PropertyCallbackArguments::BasicCallIndexedGetterCallback(
|
||||
IndexedPropertyGetterCallback f, uint32_t index) {
|
||||
Isolate* isolate = this->isolate();
|
||||
PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value);
|
||||
f(index, callback_info);
|
||||
return GetReturnValue<Object>(isolate);
|
||||
}
|
||||
|
||||
Handle<JSObject> PropertyCallbackArguments::CallPropertyEnumerator(
|
||||
Handle<InterceptorInfo> interceptor) {
|
||||
// For now there is a single enumerator for indexed and named properties.
|
||||
IndexedPropertyEnumeratorCallback f =
|
||||
v8::ToCData<IndexedPropertyEnumeratorCallback>(interceptor->enumerator());
|
||||
// TODO(cbruni): assert same type for indexed and named callback.
|
||||
Isolate* isolate = this->isolate();
|
||||
PREPARE_CALLBACK_INFO(isolate, f, Handle<JSObject>, v8::Array);
|
||||
f(callback_info);
|
||||
return GetReturnValue<JSObject>(isolate);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Accessors
|
||||
|
||||
Handle<Object> PropertyCallbackArguments::CallAccessorGetter(
|
||||
Handle<AccessorInfo> info, Handle<Name> name) {
|
||||
Isolate* isolate = this->isolate();
|
||||
RuntimeCallTimerScope timer(isolate,
|
||||
RuntimeCallCounterId::kAccessorGetterCallback);
|
||||
LOG(isolate, ApiNamedPropertyAccess("accessor-getter", holder(), *name));
|
||||
AccessorNameGetterCallback f =
|
||||
ToCData<AccessorNameGetterCallback>(info->getter());
|
||||
return BasicCallNamedGetterCallback(f, name);
|
||||
}
|
||||
|
||||
void PropertyCallbackArguments::CallAccessorSetter(
|
||||
Handle<AccessorInfo> accessor_info, Handle<Name> name,
|
||||
Handle<Object> value) {
|
||||
Isolate* isolate = this->isolate();
|
||||
RuntimeCallTimerScope timer(isolate,
|
||||
RuntimeCallCounterId::kAccessorSetterCallback);
|
||||
AccessorNameSetterCallback f =
|
||||
ToCData<AccessorNameSetterCallback>(accessor_info->setter());
|
||||
PREPARE_CALLBACK_INFO(isolate, f, void, void);
|
||||
LOG(isolate, ApiNamedPropertyAccess("accessor-setter", holder(), *name));
|
||||
f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), callback_info);
|
||||
}
|
||||
|
||||
#undef PREPARE_CALLBACK_INFO
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
32
deps/v8/src/api-arguments.cc
vendored
32
deps/v8/src/api-arguments.cc
vendored
@ -3,6 +3,7 @@
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/api-arguments.h"
|
||||
#include "src/api-arguments-inl.h"
|
||||
|
||||
#include "src/debug/debug.h"
|
||||
#include "src/objects-inl.h"
|
||||
@ -18,7 +19,7 @@ Handle<Object> FunctionCallbackArguments::Call(FunctionCallback f) {
|
||||
!isolate->debug()->PerformSideEffectCheckForCallback(FUNCTION_ADDR(f))) {
|
||||
return Handle<Object>();
|
||||
}
|
||||
RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::FunctionCallback);
|
||||
RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kFunctionCallback);
|
||||
VMState<EXTERNAL> state(isolate);
|
||||
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
|
||||
FunctionCallbackInfo<v8::Value> info(begin(), argv_, argc_);
|
||||
@ -26,19 +27,22 @@ Handle<Object> FunctionCallbackArguments::Call(FunctionCallback f) {
|
||||
return GetReturnValue<Object>(isolate);
|
||||
}
|
||||
|
||||
Handle<JSObject> PropertyCallbackArguments::Call(
|
||||
IndexedPropertyEnumeratorCallback f) {
|
||||
Isolate* isolate = this->isolate();
|
||||
if (isolate->needs_side_effect_check() &&
|
||||
!isolate->debug()->PerformSideEffectCheckForCallback(FUNCTION_ADDR(f))) {
|
||||
return Handle<JSObject>();
|
||||
}
|
||||
RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::PropertyCallback);
|
||||
VMState<EXTERNAL> state(isolate);
|
||||
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
|
||||
PropertyCallbackInfo<v8::Array> info(begin());
|
||||
f(info);
|
||||
return GetReturnValue<JSObject>(isolate);
|
||||
Handle<JSObject> PropertyCallbackArguments::CallNamedEnumerator(
|
||||
Handle<InterceptorInfo> interceptor) {
|
||||
DCHECK(interceptor->is_named());
|
||||
LOG(isolate(), ApiObjectAccess("interceptor-named-enumerator", holder()));
|
||||
RuntimeCallTimerScope timer(isolate(),
|
||||
RuntimeCallCounterId::kNamedEnumeratorCallback);
|
||||
return CallPropertyEnumerator(interceptor);
|
||||
}
|
||||
|
||||
Handle<JSObject> PropertyCallbackArguments::CallIndexedEnumerator(
|
||||
Handle<InterceptorInfo> interceptor) {
|
||||
DCHECK(!interceptor->is_named());
|
||||
LOG(isolate(), ApiObjectAccess("interceptor-indexed-enumerator", holder()));
|
||||
RuntimeCallTimerScope timer(isolate(),
|
||||
RuntimeCallCounterId::kIndexedEnumeratorCallback);
|
||||
return CallPropertyEnumerator(interceptor);
|
||||
}
|
||||
|
||||
bool PropertyCallbackArguments::PerformSideEffectCheck(Isolate* isolate,
|
||||
|
81
deps/v8/src/api-arguments.h
vendored
81
deps/v8/src/api-arguments.h
vendored
@ -99,6 +99,54 @@ class PropertyCallbackArguments
|
||||
DCHECK(values[T::kIsolateIndex]->IsSmi());
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Accessor Callbacks
|
||||
// Also used for AccessorSetterCallback.
|
||||
inline void CallAccessorSetter(Handle<AccessorInfo> info, Handle<Name> name,
|
||||
Handle<Object> value);
|
||||
// Also used for AccessorGetterCallback, AccessorNameGetterCallback.
|
||||
inline Handle<Object> CallAccessorGetter(Handle<AccessorInfo> info,
|
||||
Handle<Name> name);
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Named Interceptor Callbacks
|
||||
inline Handle<Object> CallNamedQuery(Handle<InterceptorInfo> interceptor,
|
||||
Handle<Name> name);
|
||||
inline Handle<Object> CallNamedGetter(Handle<InterceptorInfo> interceptor,
|
||||
Handle<Name> name);
|
||||
inline Handle<Object> CallNamedSetter(Handle<InterceptorInfo> interceptor,
|
||||
Handle<Name> name,
|
||||
Handle<Object> value);
|
||||
inline Handle<Object> CallNamedSetterCallback(
|
||||
GenericNamedPropertySetterCallback callback, Handle<Name> name,
|
||||
Handle<Object> value);
|
||||
inline Handle<Object> CallNamedDefiner(Handle<InterceptorInfo> interceptor,
|
||||
Handle<Name> name,
|
||||
const v8::PropertyDescriptor& desc);
|
||||
inline Handle<Object> CallNamedDeleter(Handle<InterceptorInfo> interceptor,
|
||||
Handle<Name> name);
|
||||
inline Handle<Object> CallNamedDescriptor(Handle<InterceptorInfo> interceptor,
|
||||
Handle<Name> name);
|
||||
Handle<JSObject> CallNamedEnumerator(Handle<InterceptorInfo> interceptor);
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Indexed Interceptor Callbacks
|
||||
inline Handle<Object> CallIndexedQuery(Handle<InterceptorInfo> interceptor,
|
||||
uint32_t index);
|
||||
inline Handle<Object> CallIndexedGetter(Handle<InterceptorInfo> interceptor,
|
||||
uint32_t index);
|
||||
inline Handle<Object> CallIndexedSetter(Handle<InterceptorInfo> interceptor,
|
||||
uint32_t index, Handle<Object> value);
|
||||
inline Handle<Object> CallIndexedDefiner(Handle<InterceptorInfo> interceptor,
|
||||
uint32_t index,
|
||||
const v8::PropertyDescriptor& desc);
|
||||
inline Handle<Object> CallIndexedDeleter(Handle<InterceptorInfo> interceptor,
|
||||
uint32_t index);
|
||||
inline Handle<Object> CallIndexedDescriptor(
|
||||
Handle<InterceptorInfo> interceptor, uint32_t index);
|
||||
Handle<JSObject> CallIndexedEnumerator(Handle<InterceptorInfo> interceptor);
|
||||
|
||||
private:
|
||||
/*
|
||||
* The following Call functions wrap the calling of all callbacks to handle
|
||||
* calling either the old or the new style callbacks depending on which one
|
||||
@ -107,35 +155,14 @@ class PropertyCallbackArguments
|
||||
* and used if it's been set to anything inside the callback.
|
||||
* New style callbacks always use the return value.
|
||||
*/
|
||||
Handle<JSObject> Call(IndexedPropertyEnumeratorCallback f);
|
||||
inline Handle<JSObject> CallPropertyEnumerator(
|
||||
Handle<InterceptorInfo> interceptor);
|
||||
|
||||
inline Handle<Object> Call(AccessorNameGetterCallback f, Handle<Name> name);
|
||||
inline Handle<Object> Call(GenericNamedPropertyQueryCallback f,
|
||||
Handle<Name> name);
|
||||
inline Handle<Object> Call(GenericNamedPropertyDeleterCallback f,
|
||||
Handle<Name> name);
|
||||
inline Handle<Object> BasicCallIndexedGetterCallback(
|
||||
IndexedPropertyGetterCallback f, uint32_t index);
|
||||
inline Handle<Object> BasicCallNamedGetterCallback(
|
||||
GenericNamedPropertyGetterCallback f, Handle<Name> name);
|
||||
|
||||
inline Handle<Object> Call(IndexedPropertyGetterCallback f, uint32_t index);
|
||||
inline Handle<Object> Call(IndexedPropertyQueryCallback f, uint32_t index);
|
||||
inline Handle<Object> Call(IndexedPropertyDeleterCallback f, uint32_t index);
|
||||
|
||||
inline Handle<Object> Call(GenericNamedPropertySetterCallback f,
|
||||
Handle<Name> name, Handle<Object> value);
|
||||
|
||||
inline Handle<Object> Call(GenericNamedPropertyDefinerCallback f,
|
||||
Handle<Name> name,
|
||||
const v8::PropertyDescriptor& desc);
|
||||
|
||||
inline Handle<Object> Call(IndexedPropertySetterCallback f, uint32_t index,
|
||||
Handle<Object> value);
|
||||
|
||||
inline Handle<Object> Call(IndexedPropertyDefinerCallback f, uint32_t index,
|
||||
const v8::PropertyDescriptor& desc);
|
||||
|
||||
inline void Call(AccessorNameSetterCallback f, Handle<Name> name,
|
||||
Handle<Object> value);
|
||||
|
||||
private:
|
||||
inline JSObject* holder() {
|
||||
return JSObject::cast(this->begin()[T::kHolderIndex]);
|
||||
}
|
||||
|
10
deps/v8/src/api-natives.cc
vendored
10
deps/v8/src/api-natives.cc
vendored
@ -705,7 +705,7 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
|
||||
// that is undetectable but not callable, we need to update the types.h
|
||||
// to allow encoding this.
|
||||
CHECK(!obj->instance_call_handler()->IsUndefined(isolate));
|
||||
map->set_is_undetectable();
|
||||
map->set_is_undetectable(true);
|
||||
}
|
||||
|
||||
// Mark as needs_access_check if needed.
|
||||
@ -716,20 +716,20 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
|
||||
|
||||
// Set interceptor information in the map.
|
||||
if (!obj->named_property_handler()->IsUndefined(isolate)) {
|
||||
map->set_has_named_interceptor();
|
||||
map->set_has_named_interceptor(true);
|
||||
map->set_may_have_interesting_symbols(true);
|
||||
}
|
||||
if (!obj->indexed_property_handler()->IsUndefined(isolate)) {
|
||||
map->set_has_indexed_interceptor();
|
||||
map->set_has_indexed_interceptor(true);
|
||||
}
|
||||
|
||||
// Mark instance as callable in the map.
|
||||
if (!obj->instance_call_handler()->IsUndefined(isolate)) {
|
||||
map->set_is_callable();
|
||||
map->set_is_callable(true);
|
||||
map->set_is_constructor(true);
|
||||
}
|
||||
|
||||
if (immutable_proto) map->set_immutable_proto(true);
|
||||
if (immutable_proto) map->set_is_immutable_proto(true);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
979
deps/v8/src/api.cc
vendored
979
deps/v8/src/api.cc
vendored
File diff suppressed because it is too large
Load Diff
24
deps/v8/src/api.h
vendored
24
deps/v8/src/api.h
vendored
@ -11,6 +11,7 @@
|
||||
#include "src/detachable-vector.h"
|
||||
#include "src/factory.h"
|
||||
#include "src/isolate.h"
|
||||
#include "src/objects/js-collection.h"
|
||||
|
||||
namespace v8 {
|
||||
|
||||
@ -404,6 +405,7 @@ class HandleScopeImplementer {
|
||||
call_depth_(0),
|
||||
microtasks_depth_(0),
|
||||
microtasks_suppressions_(0),
|
||||
entered_contexts_count_(0),
|
||||
entered_context_count_during_microtasks_(0),
|
||||
#ifdef DEBUG
|
||||
debug_microtasks_depth_(0),
|
||||
@ -530,6 +532,7 @@ class HandleScopeImplementer {
|
||||
int call_depth_;
|
||||
int microtasks_depth_;
|
||||
int microtasks_suppressions_;
|
||||
size_t entered_contexts_count_;
|
||||
size_t entered_context_count_during_microtasks_;
|
||||
#ifdef DEBUG
|
||||
int debug_microtasks_depth_;
|
||||
@ -545,10 +548,25 @@ class HandleScopeImplementer {
|
||||
|
||||
friend class DeferredHandles;
|
||||
friend class DeferredHandleScope;
|
||||
friend class HandleScopeImplementerOffsets;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(HandleScopeImplementer);
|
||||
};
|
||||
|
||||
class HandleScopeImplementerOffsets {
|
||||
public:
|
||||
enum Offsets {
|
||||
kMicrotaskContext = offsetof(HandleScopeImplementer, microtask_context_),
|
||||
kEnteredContexts = offsetof(HandleScopeImplementer, entered_contexts_),
|
||||
kEnteredContextsCount =
|
||||
offsetof(HandleScopeImplementer, entered_contexts_count_),
|
||||
kEnteredContextCountDuringMicrotasks = offsetof(
|
||||
HandleScopeImplementer, entered_context_count_during_microtasks_)
|
||||
};
|
||||
|
||||
private:
|
||||
DISALLOW_IMPLICIT_CONSTRUCTORS(HandleScopeImplementerOffsets);
|
||||
};
|
||||
|
||||
const int kHandleBlockSize = v8::internal::KB - 2; // fit in one page
|
||||
|
||||
@ -583,9 +601,13 @@ bool HandleScopeImplementer::HasSavedContexts() {
|
||||
|
||||
void HandleScopeImplementer::EnterContext(Handle<Context> context) {
|
||||
entered_contexts_.push_back(*context);
|
||||
entered_contexts_count_ = entered_contexts_.size();
|
||||
}
|
||||
|
||||
void HandleScopeImplementer::LeaveContext() { entered_contexts_.pop_back(); }
|
||||
void HandleScopeImplementer::LeaveContext() {
|
||||
entered_contexts_.pop_back();
|
||||
entered_contexts_count_ = entered_contexts_.size();
|
||||
}
|
||||
|
||||
bool HandleScopeImplementer::LastEnteredContextWas(Handle<Context> context) {
|
||||
return !entered_contexts_.empty() && entered_contexts_.back() == *context;
|
||||
|
2
deps/v8/src/arguments.h
vendored
2
deps/v8/src/arguments.h
vendored
@ -85,7 +85,7 @@ double ClobberDoubleRegisters(double x1, double x2, double x3, double x4);
|
||||
\
|
||||
V8_NOINLINE static Type Stats_##Name(int args_length, Object** args_object, \
|
||||
Isolate* isolate) { \
|
||||
RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Name); \
|
||||
RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::k##Name); \
|
||||
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"), \
|
||||
"V8.Runtime_" #Name); \
|
||||
Arguments args(args_length, args_object); \
|
||||
|
30
deps/v8/src/arm/assembler-arm-inl.h
vendored
30
deps/v8/src/arm/assembler-arm-inl.h
vendored
@ -68,7 +68,7 @@ void RelocInfo::apply(intptr_t delta) {
|
||||
|
||||
Address RelocInfo::target_address() {
|
||||
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
|
||||
return Assembler::target_address_at(pc_, host_);
|
||||
return Assembler::target_address_at(pc_, constant_pool_);
|
||||
}
|
||||
|
||||
Address RelocInfo::target_address_address() {
|
||||
@ -85,7 +85,7 @@ Address RelocInfo::target_address_address() {
|
||||
|
||||
Address RelocInfo::constant_pool_entry_address() {
|
||||
DCHECK(IsInConstantPool());
|
||||
return Assembler::constant_pool_entry_address(pc_, host_->constant_pool());
|
||||
return Assembler::constant_pool_entry_address(pc_, constant_pool_);
|
||||
}
|
||||
|
||||
|
||||
@ -95,21 +95,21 @@ int RelocInfo::target_address_size() {
|
||||
|
||||
HeapObject* RelocInfo::target_object() {
|
||||
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
|
||||
return HeapObject::cast(
|
||||
reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_)));
|
||||
return HeapObject::cast(reinterpret_cast<Object*>(
|
||||
Assembler::target_address_at(pc_, constant_pool_)));
|
||||
}
|
||||
|
||||
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
|
||||
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
|
||||
return Handle<HeapObject>(
|
||||
reinterpret_cast<HeapObject**>(Assembler::target_address_at(pc_, host_)));
|
||||
return Handle<HeapObject>(reinterpret_cast<HeapObject**>(
|
||||
Assembler::target_address_at(pc_, constant_pool_)));
|
||||
}
|
||||
|
||||
void RelocInfo::set_target_object(HeapObject* target,
|
||||
WriteBarrierMode write_barrier_mode,
|
||||
ICacheFlushMode icache_flush_mode) {
|
||||
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
|
||||
Assembler::set_target_address_at(target->GetIsolate(), pc_, host_,
|
||||
Assembler::set_target_address_at(target->GetIsolate(), pc_, constant_pool_,
|
||||
reinterpret_cast<Address>(target),
|
||||
icache_flush_mode);
|
||||
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
|
||||
@ -122,7 +122,7 @@ void RelocInfo::set_target_object(HeapObject* target,
|
||||
|
||||
Address RelocInfo::target_external_reference() {
|
||||
DCHECK(rmode_ == EXTERNAL_REFERENCE);
|
||||
return Assembler::target_address_at(pc_, host_);
|
||||
return Assembler::target_address_at(pc_, constant_pool_);
|
||||
}
|
||||
|
||||
|
||||
@ -158,7 +158,7 @@ void RelocInfo::WipeOut(Isolate* isolate) {
|
||||
if (IsInternalReference(rmode_)) {
|
||||
Memory::Address_at(pc_) = nullptr;
|
||||
} else {
|
||||
Assembler::set_target_address_at(isolate, pc_, host_, nullptr);
|
||||
Assembler::set_target_address_at(isolate, pc_, constant_pool_, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
@ -382,18 +382,6 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
|
||||
}
|
||||
}
|
||||
|
||||
Address Assembler::target_address_at(Address pc, Code* code) {
|
||||
Address constant_pool = code ? code->constant_pool() : nullptr;
|
||||
return target_address_at(pc, constant_pool);
|
||||
}
|
||||
|
||||
void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
|
||||
Address target,
|
||||
ICacheFlushMode icache_flush_mode) {
|
||||
Address constant_pool = code ? code->constant_pool() : nullptr;
|
||||
set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
|
||||
}
|
||||
|
||||
EnsureSpace::EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
|
||||
|
||||
} // namespace internal
|
||||
|
130
deps/v8/src/arm/assembler-arm.cc
vendored
130
deps/v8/src/arm/assembler-arm.cc
vendored
@ -72,7 +72,7 @@ static unsigned CpuFeaturesFromCommandLine() {
|
||||
" armv7+sudiv\n"
|
||||
" armv7\n"
|
||||
" armv6\n");
|
||||
CHECK(false);
|
||||
FATAL("arm-arch");
|
||||
}
|
||||
|
||||
// If any of the old (deprecated) flags are specified, print a warning, but
|
||||
@ -339,21 +339,23 @@ bool RelocInfo::IsInConstantPool() {
|
||||
}
|
||||
|
||||
Address RelocInfo::embedded_address() const {
|
||||
return Assembler::target_address_at(pc_, host_);
|
||||
return Assembler::target_address_at(pc_, constant_pool_);
|
||||
}
|
||||
|
||||
uint32_t RelocInfo::embedded_size() const {
|
||||
return reinterpret_cast<uint32_t>(Assembler::target_address_at(pc_, host_));
|
||||
return reinterpret_cast<uint32_t>(
|
||||
Assembler::target_address_at(pc_, constant_pool_));
|
||||
}
|
||||
|
||||
void RelocInfo::set_embedded_address(Isolate* isolate, Address address,
|
||||
ICacheFlushMode flush_mode) {
|
||||
Assembler::set_target_address_at(isolate, pc_, host_, address, flush_mode);
|
||||
Assembler::set_target_address_at(isolate, pc_, constant_pool_, address,
|
||||
flush_mode);
|
||||
}
|
||||
|
||||
void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
|
||||
ICacheFlushMode flush_mode) {
|
||||
Assembler::set_target_address_at(isolate, pc_, host_,
|
||||
Assembler::set_target_address_at(isolate, pc_, constant_pool_,
|
||||
reinterpret_cast<Address>(size), flush_mode);
|
||||
}
|
||||
|
||||
@ -474,7 +476,6 @@ void NeonMemOperand::SetAlignment(int align) {
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
align_ = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -519,23 +520,23 @@ const Instr kBlxRegMask =
|
||||
const Instr kBlxRegPattern =
|
||||
B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX;
|
||||
const Instr kBlxIp = al | kBlxRegPattern | ip.code();
|
||||
const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
|
||||
const Instr kMovMvnPattern = 0xd * B21;
|
||||
const Instr kMovMvnMask = 0x6D * B21 | 0xF * B16;
|
||||
const Instr kMovMvnPattern = 0xD * B21;
|
||||
const Instr kMovMvnFlip = B22;
|
||||
const Instr kMovLeaveCCMask = 0xdff * B16;
|
||||
const Instr kMovLeaveCCPattern = 0x1a0 * B16;
|
||||
const Instr kMovLeaveCCMask = 0xDFF * B16;
|
||||
const Instr kMovLeaveCCPattern = 0x1A0 * B16;
|
||||
const Instr kMovwPattern = 0x30 * B20;
|
||||
const Instr kMovtPattern = 0x34 * B20;
|
||||
const Instr kMovwLeaveCCFlip = 0x5 * B21;
|
||||
const Instr kMovImmedMask = 0x7f * B21;
|
||||
const Instr kMovImmedPattern = 0x1d * B21;
|
||||
const Instr kOrrImmedMask = 0x7f * B21;
|
||||
const Instr kOrrImmedPattern = 0x1c * B21;
|
||||
const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
|
||||
const Instr kMovImmedMask = 0x7F * B21;
|
||||
const Instr kMovImmedPattern = 0x1D * B21;
|
||||
const Instr kOrrImmedMask = 0x7F * B21;
|
||||
const Instr kOrrImmedPattern = 0x1C * B21;
|
||||
const Instr kCmpCmnMask = 0xDD * B20 | 0xF * B12;
|
||||
const Instr kCmpCmnPattern = 0x15 * B20;
|
||||
const Instr kCmpCmnFlip = B21;
|
||||
const Instr kAddSubFlip = 0x6 * B21;
|
||||
const Instr kAndBicFlip = 0xe * B21;
|
||||
const Instr kAndBicFlip = 0xE * B21;
|
||||
|
||||
// A mask for the Rd register for push, pop, ldr, str instructions.
|
||||
const Instr kLdrRegFpOffsetPattern = al | B26 | L | Offset | fp.code() * B16;
|
||||
@ -543,7 +544,7 @@ const Instr kStrRegFpOffsetPattern = al | B26 | Offset | fp.code() * B16;
|
||||
const Instr kLdrRegFpNegOffsetPattern =
|
||||
al | B26 | L | NegOffset | fp.code() * B16;
|
||||
const Instr kStrRegFpNegOffsetPattern = al | B26 | NegOffset | fp.code() * B16;
|
||||
const Instr kLdrStrInstrTypeMask = 0xffff0000;
|
||||
const Instr kLdrStrInstrTypeMask = 0xFFFF0000;
|
||||
|
||||
Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
|
||||
: AssemblerBase(isolate_data, buffer, buffer_size),
|
||||
@ -1046,7 +1047,7 @@ bool FitsShifter(uint32_t imm32, uint32_t* rotate_imm, uint32_t* immed_8,
|
||||
// imm32 must be unsigned.
|
||||
for (int rot = 0; rot < 16; rot++) {
|
||||
uint32_t imm8 = base::bits::RotateLeft32(imm32, 2 * rot);
|
||||
if ((imm8 <= 0xff)) {
|
||||
if ((imm8 <= 0xFF)) {
|
||||
*rotate_imm = rot;
|
||||
*immed_8 = imm8;
|
||||
return true;
|
||||
@ -1172,7 +1173,7 @@ void Assembler::Move32BitImmediate(Register rd, const Operand& x,
|
||||
if (CpuFeatures::IsSupported(ARMv7)) {
|
||||
uint32_t imm32 = static_cast<uint32_t>(x.immediate());
|
||||
CpuFeatureScope scope(this, ARMv7);
|
||||
movw(target, imm32 & 0xffff, cond);
|
||||
movw(target, imm32 & 0xFFFF, cond);
|
||||
movt(target, imm32 >> 16, cond);
|
||||
}
|
||||
if (target.code() != rd.code()) {
|
||||
@ -1187,7 +1188,7 @@ void Assembler::Move32BitImmediate(Register rd, const Operand& x,
|
||||
immediate = x.immediate();
|
||||
}
|
||||
ConstantPoolAddEntry(pc_offset(), x.rmode_, immediate);
|
||||
ldr(rd, MemOperand(pc, 0), cond);
|
||||
ldr_pcrel(rd, 0, cond);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1234,7 +1235,7 @@ void Assembler::AddrMode1(Instr instr, Register rd, Register rn,
|
||||
// This means that finding the even number of trailing zeroes of the
|
||||
// immediate allows us to more efficiently split it:
|
||||
int trailing_zeroes = base::bits::CountTrailingZeros(imm) & ~1u;
|
||||
uint32_t mask = (0xff << trailing_zeroes);
|
||||
uint32_t mask = (0xFF << trailing_zeroes);
|
||||
add(rd, rd, Operand(imm & mask), LeaveCC, cond);
|
||||
imm = imm & ~mask;
|
||||
} while (!ImmediateFitsAddrMode1Instruction(imm));
|
||||
@ -1294,6 +1295,9 @@ bool Assembler::AddrMode1TryEncodeOperand(Instr* instr, const Operand& x) {
|
||||
|
||||
void Assembler::AddrMode2(Instr instr, Register rd, const MemOperand& x) {
|
||||
DCHECK((instr & ~(kCondMask | B | L)) == B26);
|
||||
// This method does not handle pc-relative addresses. ldr_pcrel() should be
|
||||
// used instead.
|
||||
DCHECK(x.rn_ != pc);
|
||||
int am = x.am_;
|
||||
if (!x.rm_.is_valid()) {
|
||||
// Immediate offset.
|
||||
@ -1331,6 +1335,9 @@ void Assembler::AddrMode2(Instr instr, Register rd, const MemOperand& x) {
|
||||
void Assembler::AddrMode3(Instr instr, Register rd, const MemOperand& x) {
|
||||
DCHECK((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7));
|
||||
DCHECK(x.rn_.is_valid());
|
||||
// This method does not handle pc-relative addresses. ldr_pcrel() should be
|
||||
// used instead.
|
||||
DCHECK(x.rn_ != pc);
|
||||
int am = x.am_;
|
||||
bool is_load = (instr & L) == L;
|
||||
if (!x.rm_.is_valid()) {
|
||||
@ -1353,7 +1360,7 @@ void Assembler::AddrMode3(Instr instr, Register rd, const MemOperand& x) {
|
||||
return;
|
||||
}
|
||||
DCHECK_GE(offset_8, 0); // no masking needed
|
||||
instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
|
||||
instr |= B | (offset_8 >> 4) * B8 | (offset_8 & 0xF);
|
||||
} else if (x.shift_imm_ != 0) {
|
||||
// Scaled register offsets are not supported, compute the offset separately
|
||||
// to a scratch register.
|
||||
@ -1709,8 +1716,8 @@ void Assembler::sdiv(Register dst, Register src1, Register src2,
|
||||
Condition cond) {
|
||||
DCHECK(dst != pc && src1 != pc && src2 != pc);
|
||||
DCHECK(IsEnabled(SUDIV));
|
||||
emit(cond | B26 | B25| B24 | B20 | dst.code()*B16 | 0xf * B12 |
|
||||
src2.code()*B8 | B4 | src1.code());
|
||||
emit(cond | B26 | B25 | B24 | B20 | dst.code() * B16 | 0xF * B12 |
|
||||
src2.code() * B8 | B4 | src1.code());
|
||||
}
|
||||
|
||||
|
||||
@ -1718,7 +1725,7 @@ void Assembler::udiv(Register dst, Register src1, Register src2,
|
||||
Condition cond) {
|
||||
DCHECK(dst != pc && src1 != pc && src2 != pc);
|
||||
DCHECK(IsEnabled(SUDIV));
|
||||
emit(cond | B26 | B25 | B24 | B21 | B20 | dst.code() * B16 | 0xf * B12 |
|
||||
emit(cond | B26 | B25 | B24 | B21 | B20 | dst.code() * B16 | 0xF * B12 |
|
||||
src2.code() * B8 | B4 | src1.code());
|
||||
}
|
||||
|
||||
@ -1742,7 +1749,7 @@ void Assembler::smmla(Register dst, Register src1, Register src2, Register srcA,
|
||||
void Assembler::smmul(Register dst, Register src1, Register src2,
|
||||
Condition cond) {
|
||||
DCHECK(dst != pc && src1 != pc && src2 != pc);
|
||||
emit(cond | B26 | B25 | B24 | B22 | B20 | dst.code() * B16 | 0xf * B12 |
|
||||
emit(cond | B26 | B25 | B24 | B22 | B20 | dst.code() * B16 | 0xF * B12 |
|
||||
src2.code() * B8 | B4 | src1.code());
|
||||
}
|
||||
|
||||
@ -1824,8 +1831,8 @@ void Assembler::usat(Register dst,
|
||||
sh = 1;
|
||||
}
|
||||
|
||||
emit(cond | 0x6*B24 | 0xe*B20 | satpos*B16 | dst.code()*B12 |
|
||||
src.shift_imm_*B7 | sh*B6 | 0x1*B4 | src.rm_.code());
|
||||
emit(cond | 0x6 * B24 | 0xE * B20 | satpos * B16 | dst.code() * B12 |
|
||||
src.shift_imm_ * B7 | sh * B6 | 0x1 * B4 | src.rm_.code());
|
||||
}
|
||||
|
||||
|
||||
@ -1844,8 +1851,8 @@ void Assembler::ubfx(Register dst,
|
||||
DCHECK(dst != pc && src != pc);
|
||||
DCHECK((lsb >= 0) && (lsb <= 31));
|
||||
DCHECK((width >= 1) && (width <= (32 - lsb)));
|
||||
emit(cond | 0xf*B23 | B22 | B21 | (width - 1)*B16 | dst.code()*B12 |
|
||||
lsb*B7 | B6 | B4 | src.code());
|
||||
emit(cond | 0xF * B23 | B22 | B21 | (width - 1) * B16 | dst.code() * B12 |
|
||||
lsb * B7 | B6 | B4 | src.code());
|
||||
}
|
||||
|
||||
|
||||
@ -1863,8 +1870,8 @@ void Assembler::sbfx(Register dst,
|
||||
DCHECK(dst != pc && src != pc);
|
||||
DCHECK((lsb >= 0) && (lsb <= 31));
|
||||
DCHECK((width >= 1) && (width <= (32 - lsb)));
|
||||
emit(cond | 0xf*B23 | B21 | (width - 1)*B16 | dst.code()*B12 |
|
||||
lsb*B7 | B6 | B4 | src.code());
|
||||
emit(cond | 0xF * B23 | B21 | (width - 1) * B16 | dst.code() * B12 |
|
||||
lsb * B7 | B6 | B4 | src.code());
|
||||
}
|
||||
|
||||
|
||||
@ -1878,7 +1885,7 @@ void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
|
||||
DCHECK((lsb >= 0) && (lsb <= 31));
|
||||
DCHECK((width >= 1) && (width <= (32 - lsb)));
|
||||
int msb = lsb + width - 1;
|
||||
emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | 0xf);
|
||||
emit(cond | 0x1F * B22 | msb * B16 | dst.code() * B12 | lsb * B7 | B4 | 0xF);
|
||||
}
|
||||
|
||||
|
||||
@ -1896,7 +1903,7 @@ void Assembler::bfi(Register dst,
|
||||
DCHECK((lsb >= 0) && (lsb <= 31));
|
||||
DCHECK((width >= 1) && (width <= (32 - lsb)));
|
||||
int msb = lsb + width - 1;
|
||||
emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 |
|
||||
emit(cond | 0x1F * B22 | msb * B16 | dst.code() * B12 | lsb * B7 | B4 |
|
||||
src.code());
|
||||
}
|
||||
|
||||
@ -2073,8 +2080,8 @@ void Assembler::mrs(Register dst, SRegister s, Condition cond) {
|
||||
|
||||
void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
|
||||
Condition cond) {
|
||||
DCHECK_NE(fields & 0x000f0000, 0); // At least one field must be set.
|
||||
DCHECK(((fields & 0xfff0ffff) == CPSR) || ((fields & 0xfff0ffff) == SPSR));
|
||||
DCHECK_NE(fields & 0x000F0000, 0); // At least one field must be set.
|
||||
DCHECK(((fields & 0xFFF0FFFF) == CPSR) || ((fields & 0xFFF0FFFF) == SPSR));
|
||||
Instr instr;
|
||||
if (src.IsImmediate()) {
|
||||
// Immediate.
|
||||
@ -2159,13 +2166,23 @@ void Assembler::strd(Register src1, Register src2,
|
||||
AddrMode3(cond | B7 | B6 | B5 | B4, src1, dst);
|
||||
}
|
||||
|
||||
void Assembler::ldr_pcrel(Register dst, int imm12, Condition cond) {
|
||||
AddrMode am = Offset;
|
||||
if (imm12 < 0) {
|
||||
imm12 = -imm12;
|
||||
am = NegOffset;
|
||||
}
|
||||
DCHECK(is_uint12(imm12));
|
||||
emit(cond | B26 | am | L | pc.code() * B16 | dst.code() * B12 | imm12);
|
||||
}
|
||||
|
||||
// Load/Store exclusive instructions.
|
||||
void Assembler::ldrex(Register dst, Register src, Condition cond) {
|
||||
// Instruction details available in ARM DDI 0406C.b, A8.8.75.
|
||||
// cond(31-28) | 00011001(27-20) | Rn(19-16) | Rt(15-12) | 111110011111(11-0)
|
||||
DCHECK(dst != pc);
|
||||
DCHECK(src != pc);
|
||||
emit(cond | B24 | B23 | B20 | src.code() * B16 | dst.code() * B12 | 0xf9f);
|
||||
emit(cond | B24 | B23 | B20 | src.code() * B16 | dst.code() * B12 | 0xF9F);
|
||||
}
|
||||
|
||||
void Assembler::strex(Register src1, Register src2, Register dst,
|
||||
@ -2178,7 +2195,7 @@ void Assembler::strex(Register src1, Register src2, Register dst,
|
||||
DCHECK(src2 != pc);
|
||||
DCHECK(src1 != dst);
|
||||
DCHECK(src1 != src2);
|
||||
emit(cond | B24 | B23 | dst.code() * B16 | src1.code() * B12 | 0xf9 * B4 |
|
||||
emit(cond | B24 | B23 | dst.code() * B16 | src1.code() * B12 | 0xF9 * B4 |
|
||||
src2.code());
|
||||
}
|
||||
|
||||
@ -2188,7 +2205,7 @@ void Assembler::ldrexb(Register dst, Register src, Condition cond) {
|
||||
DCHECK(dst != pc);
|
||||
DCHECK(src != pc);
|
||||
emit(cond | B24 | B23 | B22 | B20 | src.code() * B16 | dst.code() * B12 |
|
||||
0xf9f);
|
||||
0xF9F);
|
||||
}
|
||||
|
||||
void Assembler::strexb(Register src1, Register src2, Register dst,
|
||||
@ -2202,7 +2219,7 @@ void Assembler::strexb(Register src1, Register src2, Register dst,
|
||||
DCHECK(src1 != dst);
|
||||
DCHECK(src1 != src2);
|
||||
emit(cond | B24 | B23 | B22 | dst.code() * B16 | src1.code() * B12 |
|
||||
0xf9 * B4 | src2.code());
|
||||
0xF9 * B4 | src2.code());
|
||||
}
|
||||
|
||||
void Assembler::ldrexh(Register dst, Register src, Condition cond) {
|
||||
@ -2211,7 +2228,7 @@ void Assembler::ldrexh(Register dst, Register src, Condition cond) {
|
||||
DCHECK(dst != pc);
|
||||
DCHECK(src != pc);
|
||||
emit(cond | B24 | B23 | B22 | B21 | B20 | src.code() * B16 |
|
||||
dst.code() * B12 | 0xf9f);
|
||||
dst.code() * B12 | 0xF9F);
|
||||
}
|
||||
|
||||
void Assembler::strexh(Register src1, Register src2, Register dst,
|
||||
@ -2225,7 +2242,7 @@ void Assembler::strexh(Register src1, Register src2, Register dst,
|
||||
DCHECK(src1 != dst);
|
||||
DCHECK(src1 != src2);
|
||||
emit(cond | B24 | B23 | B22 | B21 | dst.code() * B16 | src1.code() * B12 |
|
||||
0xf9 * B4 | src2.code());
|
||||
0xF9 * B4 | src2.code());
|
||||
}
|
||||
|
||||
// Preload instructions.
|
||||
@ -2242,8 +2259,8 @@ void Assembler::pld(const MemOperand& address) {
|
||||
U = 0;
|
||||
}
|
||||
DCHECK_LT(offset, 4096);
|
||||
emit(kSpecialCondition | B26 | B24 | U | B22 | B20 | address.rn().code()*B16 |
|
||||
0xf*B12 | offset);
|
||||
emit(kSpecialCondition | B26 | B24 | U | B22 | B20 |
|
||||
address.rn().code() * B16 | 0xF * B12 | offset);
|
||||
}
|
||||
|
||||
|
||||
@ -2305,7 +2322,7 @@ void Assembler::stop(const char* msg, Condition cond, int32_t code) {
|
||||
|
||||
void Assembler::bkpt(uint32_t imm16) {
|
||||
DCHECK(is_uint16(imm16));
|
||||
emit(al | B24 | B21 | (imm16 >> 4)*B8 | BKPT | (imm16 & 0xf));
|
||||
emit(al | B24 | B21 | (imm16 >> 4) * B8 | BKPT | (imm16 & 0xF));
|
||||
}
|
||||
|
||||
|
||||
@ -2318,7 +2335,7 @@ void Assembler::svc(uint32_t imm24, Condition cond) {
|
||||
void Assembler::dmb(BarrierOption option) {
|
||||
if (CpuFeatures::IsSupported(ARMv7)) {
|
||||
// Details available in ARM DDI 0406C.b, A8-378.
|
||||
emit(kSpecialCondition | 0x57ff * B12 | 5 * B4 | option);
|
||||
emit(kSpecialCondition | 0x57FF * B12 | 5 * B4 | option);
|
||||
} else {
|
||||
// Details available in ARM DDI 0406C.b, B3-1750.
|
||||
// CP15DMB: CRn=c7, opc1=0, CRm=c10, opc2=5, Rt is ignored.
|
||||
@ -2330,7 +2347,7 @@ void Assembler::dmb(BarrierOption option) {
|
||||
void Assembler::dsb(BarrierOption option) {
|
||||
if (CpuFeatures::IsSupported(ARMv7)) {
|
||||
// Details available in ARM DDI 0406C.b, A8-380.
|
||||
emit(kSpecialCondition | 0x57ff * B12 | 4 * B4 | option);
|
||||
emit(kSpecialCondition | 0x57FF * B12 | 4 * B4 | option);
|
||||
} else {
|
||||
// Details available in ARM DDI 0406C.b, B3-1750.
|
||||
// CP15DSB: CRn=c7, opc1=0, CRm=c10, opc2=4, Rt is ignored.
|
||||
@ -2342,7 +2359,7 @@ void Assembler::dsb(BarrierOption option) {
|
||||
void Assembler::isb(BarrierOption option) {
|
||||
if (CpuFeatures::IsSupported(ARMv7)) {
|
||||
// Details available in ARM DDI 0406C.b, A8-389.
|
||||
emit(kSpecialCondition | 0x57ff * B12 | 6 * B4 | option);
|
||||
emit(kSpecialCondition | 0x57FF * B12 | 6 * B4 | option);
|
||||
} else {
|
||||
// Details available in ARM DDI 0406C.b, B3-1750.
|
||||
// CP15ISB: CRn=c7, opc1=0, CRm=c5, opc2=4, Rt is ignored.
|
||||
@ -2728,7 +2745,7 @@ void Assembler::vstm(BlockAddrMode am, Register base, SwVfpRegister first,
|
||||
static void DoubleAsTwoUInt32(Double d, uint32_t* lo, uint32_t* hi) {
|
||||
uint64_t i = d.AsUint64();
|
||||
|
||||
*lo = i & 0xffffffff;
|
||||
*lo = i & 0xFFFFFFFF;
|
||||
*hi = i >> 32;
|
||||
}
|
||||
|
||||
@ -2757,12 +2774,12 @@ static bool FitsVmovFPImmediate(Double d, uint32_t* encoding) {
|
||||
DoubleAsTwoUInt32(d, &lo, &hi);
|
||||
|
||||
// The most obvious constraint is the long block of zeroes.
|
||||
if ((lo != 0) || ((hi & 0xffff) != 0)) {
|
||||
if ((lo != 0) || ((hi & 0xFFFF) != 0)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Bits 61:54 must be all clear or all set.
|
||||
if (((hi & 0x3fc00000) != 0) && ((hi & 0x3fc00000) != 0x3fc00000)) {
|
||||
if (((hi & 0x3FC00000) != 0) && ((hi & 0x3FC00000) != 0x3FC00000)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -2773,7 +2790,7 @@ static bool FitsVmovFPImmediate(Double d, uint32_t* encoding) {
|
||||
|
||||
// Create the encoded immediate in the form:
|
||||
// [00000000,0000abcd,00000000,0000efgh]
|
||||
*encoding = (hi >> 16) & 0xf; // Low nybble.
|
||||
*encoding = (hi >> 16) & 0xF; // Low nybble.
|
||||
*encoding |= (hi >> 4) & 0x70000; // Low three bits of the high nybble.
|
||||
*encoding |= (hi >> 12) & 0x80000; // Top bit of the high nybble.
|
||||
|
||||
@ -2852,8 +2869,7 @@ void Assembler::vmov(const DwVfpRegister dst, Double imm,
|
||||
// We only have one spare scratch register.
|
||||
mov(scratch, Operand(lo));
|
||||
vmov(dst, VmovIndexLo, scratch);
|
||||
if (((lo & 0xffff) == (hi & 0xffff)) &&
|
||||
CpuFeatures::IsSupported(ARMv7)) {
|
||||
if (((lo & 0xFFFF) == (hi & 0xFFFF)) && CpuFeatures::IsSupported(ARMv7)) {
|
||||
CpuFeatureScope scope(this, ARMv7);
|
||||
movt(scratch, hi >> 16);
|
||||
} else {
|
||||
@ -3193,7 +3209,7 @@ void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
|
||||
dst.split_code(&vd, &d);
|
||||
int imm5 = 32 - fraction_bits;
|
||||
int i = imm5 & 1;
|
||||
int imm4 = (imm5 >> 1) & 0xf;
|
||||
int imm4 = (imm5 >> 1) & 0xF;
|
||||
emit(cond | 0xE*B24 | B23 | d*B22 | 0x3*B20 | B19 | 0x2*B16 |
|
||||
vd*B12 | 0x5*B9 | B8 | B7 | B6 | i*B5 | imm4);
|
||||
}
|
||||
@ -4973,12 +4989,12 @@ Instr Assembler::GetMovWPattern() { return kMovwPattern; }
|
||||
|
||||
Instr Assembler::EncodeMovwImmediate(uint32_t immediate) {
|
||||
DCHECK_LT(immediate, 0x10000);
|
||||
return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
|
||||
return ((immediate & 0xF000) << 4) | (immediate & 0xFFF);
|
||||
}
|
||||
|
||||
|
||||
Instr Assembler::PatchMovwImmediate(Instr instruction, uint32_t immediate) {
|
||||
instruction &= ~EncodeMovwImmediate(0xffff);
|
||||
instruction &= ~EncodeMovwImmediate(0xFFFF);
|
||||
return instruction | EncodeMovwImmediate(immediate);
|
||||
}
|
||||
|
||||
|
12
deps/v8/src/arm/assembler-arm.h
vendored
12
deps/v8/src/arm/assembler-arm.h
vendored
@ -173,6 +173,7 @@ GENERAL_REGISTERS(DECLARE_REGISTER)
|
||||
#undef DECLARE_REGISTER
|
||||
constexpr Register no_reg = Register::no_reg();
|
||||
|
||||
constexpr bool kPadArguments = false;
|
||||
constexpr bool kSimpleFPAliasing = false;
|
||||
constexpr bool kSimdMaskRegisters = false;
|
||||
|
||||
@ -652,10 +653,6 @@ class Assembler : public AssemblerBase {
|
||||
INLINE(static void set_target_address_at(
|
||||
Isolate* isolate, Address pc, Address constant_pool, Address target,
|
||||
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
|
||||
INLINE(static Address target_address_at(Address pc, Code* code));
|
||||
INLINE(static void set_target_address_at(
|
||||
Isolate* isolate, Address pc, Code* code, Address target,
|
||||
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
|
||||
|
||||
// Return the code target address at a call site from the return address
|
||||
// of that call in the instruction stream.
|
||||
@ -907,6 +904,9 @@ class Assembler : public AssemblerBase {
|
||||
Register src2,
|
||||
const MemOperand& dst, Condition cond = al);
|
||||
|
||||
// Load literal from a pc relative address.
|
||||
void ldr_pcrel(Register dst, int imm12, Condition cond = al);
|
||||
|
||||
// Load/Store exclusive instructions
|
||||
void ldrex(Register dst, Register src, Condition cond = al);
|
||||
void strex(Register src1, Register src2, Register dst, Condition cond = al);
|
||||
@ -1344,6 +1344,10 @@ class Assembler : public AssemblerBase {
|
||||
|
||||
void pop();
|
||||
|
||||
void vpush(QwNeonRegister src, Condition cond = al) {
|
||||
vstm(db_w, sp, src.low(), src.high(), cond);
|
||||
}
|
||||
|
||||
void vpush(DwVfpRegister src, Condition cond = al) {
|
||||
vstm(db_w, sp, src, src, cond);
|
||||
}
|
||||
|
36
deps/v8/src/arm/code-stubs-arm.cc
vendored
36
deps/v8/src/arm/code-stubs-arm.cc
vendored
@ -83,7 +83,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
|
||||
if (masm->emit_debug_code()) {
|
||||
// Scratch is exponent - 1.
|
||||
__ cmp(scratch, Operand(30 - 1));
|
||||
__ Check(ge, kUnexpectedValue);
|
||||
__ Check(ge, AbortReason::kUnexpectedValue);
|
||||
}
|
||||
|
||||
// We don't have to handle cases where 0 <= exponent <= 20 for which we would
|
||||
@ -116,8 +116,8 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
|
||||
// double_high LSR 31 equals zero.
|
||||
// New result = (result eor 0) + 0 = result.
|
||||
// If the input was negative, we have to negate the result.
|
||||
// Input_high ASR 31 equals 0xffffffff and double_high LSR 31 equals 1.
|
||||
// New result = (result eor 0xffffffff) + 1 = 0 - result.
|
||||
// Input_high ASR 31 equals 0xFFFFFFFF and double_high LSR 31 equals 1.
|
||||
// New result = (result eor 0xFFFFFFFF) + 1 = 0 - result.
|
||||
__ eor(result_reg, result_reg, Operand(double_high, ASR, 31));
|
||||
__ add(result_reg, result_reg, Operand(double_high, LSR, 31));
|
||||
|
||||
@ -414,6 +414,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
|
||||
// Set up the reserved register for 0.0.
|
||||
__ vmov(kDoubleRegZero, Double(0.0));
|
||||
|
||||
__ InitializeRootRegister();
|
||||
|
||||
// Get address of argv, see stm above.
|
||||
// r0: code entry
|
||||
// r1: function
|
||||
@ -509,12 +511,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
|
||||
// r2: receiver
|
||||
// r3: argc
|
||||
// r4: argv
|
||||
if (type() == StackFrame::CONSTRUCT_ENTRY) {
|
||||
__ Call(BUILTIN_CODE(isolate(), JSConstructEntryTrampoline),
|
||||
RelocInfo::CODE_TARGET);
|
||||
} else {
|
||||
__ Call(BUILTIN_CODE(isolate(), JSEntryTrampoline), RelocInfo::CODE_TARGET);
|
||||
}
|
||||
__ Call(EntryTrampoline(), RelocInfo::CODE_TARGET);
|
||||
|
||||
// Unlink this frame from the handler chain.
|
||||
__ PopStackHandler();
|
||||
@ -681,7 +678,7 @@ static void CreateArrayDispatch(MacroAssembler* masm,
|
||||
}
|
||||
|
||||
// If we reached this point there is a problem.
|
||||
__ Abort(kUnexpectedElementsKindInArrayConstructor);
|
||||
__ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
}
|
||||
@ -723,7 +720,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
|
||||
if (FLAG_debug_code) {
|
||||
__ ldr(r5, FieldMemOperand(r2, 0));
|
||||
__ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
|
||||
__ Assert(eq, kExpectedAllocationSite);
|
||||
__ Assert(eq, AbortReason::kExpectedAllocationSite);
|
||||
}
|
||||
|
||||
// Save the resulting elements kind in type info. We can't just store r3
|
||||
@ -747,7 +744,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
|
||||
}
|
||||
|
||||
// If we reached this point there is a problem.
|
||||
__ Abort(kUnexpectedElementsKindInArrayConstructor);
|
||||
__ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
}
|
||||
@ -824,9 +821,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
|
||||
__ ldr(r4, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
|
||||
// Will both indicate a nullptr and a Smi.
|
||||
__ tst(r4, Operand(kSmiTagMask));
|
||||
__ Assert(ne, kUnexpectedInitialMapForArrayFunction);
|
||||
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction);
|
||||
__ CompareObjectType(r4, r4, r5, MAP_TYPE);
|
||||
__ Assert(eq, kUnexpectedInitialMapForArrayFunction);
|
||||
__ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
|
||||
|
||||
// We should either have undefined in r2 or a valid AllocationSite
|
||||
__ AssertUndefinedOrAllocationSite(r2, r4);
|
||||
@ -904,9 +901,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
|
||||
__ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
|
||||
// Will both indicate a nullptr and a Smi.
|
||||
__ tst(r3, Operand(kSmiTagMask));
|
||||
__ Assert(ne, kUnexpectedInitialMapForArrayFunction);
|
||||
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction);
|
||||
__ CompareObjectType(r3, r3, r4, MAP_TYPE);
|
||||
__ Assert(eq, kUnexpectedInitialMapForArrayFunction);
|
||||
__ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
|
||||
}
|
||||
|
||||
// Figure out the right elements kind
|
||||
@ -922,8 +919,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
|
||||
__ cmp(r3, Operand(PACKED_ELEMENTS));
|
||||
__ b(eq, &done);
|
||||
__ cmp(r3, Operand(HOLEY_ELEMENTS));
|
||||
__ Assert(eq,
|
||||
kInvalidElementsKindForInternalArrayOrInternalPackedArray);
|
||||
__ Assert(
|
||||
eq,
|
||||
AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray);
|
||||
__ bind(&done);
|
||||
}
|
||||
|
||||
@ -1025,7 +1023,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
|
||||
if (__ emit_debug_code()) {
|
||||
__ ldr(r1, MemOperand(r9, kLevelOffset));
|
||||
__ cmp(r1, r6);
|
||||
__ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
|
||||
__ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
|
||||
}
|
||||
__ sub(r6, r6, Operand(1));
|
||||
__ str(r6, MemOperand(r9, kLevelOffset));
|
||||
|
18
deps/v8/src/arm/codegen-arm.cc
vendored
18
deps/v8/src/arm/codegen-arm.cc
vendored
@ -24,8 +24,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
|
||||
return stub;
|
||||
#else
|
||||
size_t allocated = 0;
|
||||
byte* buffer =
|
||||
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
|
||||
byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
|
||||
if (buffer == nullptr) return stub;
|
||||
|
||||
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
|
||||
@ -170,8 +169,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
|
||||
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
|
||||
|
||||
Assembler::FlushICache(isolate, buffer, allocated);
|
||||
CHECK(base::OS::SetPermissions(buffer, allocated,
|
||||
base::OS::MemoryPermission::kReadExecute));
|
||||
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
|
||||
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
|
||||
#endif
|
||||
}
|
||||
@ -184,8 +182,7 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
|
||||
return stub;
|
||||
#else
|
||||
size_t allocated = 0;
|
||||
byte* buffer =
|
||||
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
|
||||
byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
|
||||
if (buffer == nullptr) return stub;
|
||||
|
||||
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
|
||||
@ -261,8 +258,7 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
|
||||
masm.GetCode(isolate, &desc);
|
||||
|
||||
Assembler::FlushICache(isolate, buffer, allocated);
|
||||
CHECK(base::OS::SetPermissions(buffer, allocated,
|
||||
base::OS::MemoryPermission::kReadExecute));
|
||||
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
|
||||
return FUNCTION_CAST<MemCopyUint16Uint8Function>(buffer);
|
||||
#endif
|
||||
}
|
||||
@ -273,8 +269,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
|
||||
return nullptr;
|
||||
#else
|
||||
size_t allocated = 0;
|
||||
byte* buffer =
|
||||
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
|
||||
byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
|
||||
if (buffer == nullptr) return nullptr;
|
||||
|
||||
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
|
||||
@ -290,8 +285,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
|
||||
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
|
||||
|
||||
Assembler::FlushICache(isolate, buffer, allocated);
|
||||
CHECK(base::OS::SetPermissions(buffer, allocated,
|
||||
base::OS::MemoryPermission::kReadExecute));
|
||||
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
|
||||
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
|
||||
#endif
|
||||
}
|
||||
|
2
deps/v8/src/arm/constants-arm.cc
vendored
2
deps/v8/src/arm/constants-arm.cc
vendored
@ -20,7 +20,7 @@ Float64 Instruction::DoubleImmedVmov() const {
|
||||
// where B = ~b. Only the high 16 bits are affected.
|
||||
uint64_t high16;
|
||||
high16 = (Bits(17, 16) << 4) | Bits(3, 0); // xxxxxxxx,xxcdefgh.
|
||||
high16 |= (0xff * Bit(18)) << 6; // xxbbbbbb,bbxxxxxx.
|
||||
high16 |= (0xFF * Bit(18)) << 6; // xxbbbbbb,bbxxxxxx.
|
||||
high16 |= (Bit(18) ^ 1) << 14; // xBxxxxxx,xxxxxxxx.
|
||||
high16 |= Bit(19) << 15; // axxxxxxx,xxxxxxxx.
|
||||
|
||||
|
3
deps/v8/src/arm/constants-arm.h
vendored
3
deps/v8/src/arm/constants-arm.h
vendored
@ -34,9 +34,6 @@ inline int DecodeConstantPoolLength(int instr) {
|
||||
return ((instr >> 4) & 0xfff0) | (instr & 0xf);
|
||||
}
|
||||
|
||||
// Used in code age prologue - ldr(pc, MemOperand(pc, -4))
|
||||
const int kCodeAgeJumpInstruction = 0xe51ff004;
|
||||
|
||||
// Number of registers in normal ARM mode.
|
||||
const int kNumRegisters = 16;
|
||||
|
||||
|
8
deps/v8/src/arm/deoptimizer-arm.cc
vendored
8
deps/v8/src/arm/deoptimizer-arm.cc
vendored
@ -245,9 +245,9 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
|
||||
// Note that registers are still live when jumping to an entry.
|
||||
|
||||
// We need to be able to generate immediates up to kMaxNumberOfEntries. On
|
||||
// ARMv7, we can use movw (with a maximum immediate of 0xffff). On ARMv6, we
|
||||
// ARMv7, we can use movw (with a maximum immediate of 0xFFFF). On ARMv6, we
|
||||
// need two instructions.
|
||||
STATIC_ASSERT((kMaxNumberOfEntries - 1) <= 0xffff);
|
||||
STATIC_ASSERT((kMaxNumberOfEntries - 1) <= 0xFFFF);
|
||||
UseScratchRegisterScope temps(masm());
|
||||
Register scratch = temps.Acquire();
|
||||
if (CpuFeatures::IsSupported(ARMv7)) {
|
||||
@ -263,7 +263,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
|
||||
__ bind(&done);
|
||||
} else {
|
||||
// We want to keep table_entry_size_ == 8 (since this is the common case),
|
||||
// but we need two instructions to load most immediates over 0xff. To handle
|
||||
// but we need two instructions to load most immediates over 0xFF. To handle
|
||||
// this, we set the low byte in the main table, and then set the high byte
|
||||
// in a separate table if necessary.
|
||||
Label high_fixes[256];
|
||||
@ -272,7 +272,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
|
||||
for (int i = 0; i < count(); i++) {
|
||||
int start = masm()->pc_offset();
|
||||
USE(start);
|
||||
__ mov(scratch, Operand(i & 0xff)); // Set the low byte.
|
||||
__ mov(scratch, Operand(i & 0xFF)); // Set the low byte.
|
||||
__ b(&high_fixes[i >> 8]); // Jump to the secondary table.
|
||||
DCHECK_EQ(table_entry_size_, masm()->pc_offset() - start);
|
||||
}
|
||||
|
44
deps/v8/src/arm/disasm-arm.cc
vendored
44
deps/v8/src/arm/disasm-arm.cc
vendored
@ -541,7 +541,7 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
|
||||
// 'msg: for simulator break instructions
|
||||
DCHECK(STRING_STARTS_WITH(format, "msg"));
|
||||
byte* str =
|
||||
reinterpret_cast<byte*>(instr->InstructionBits() & 0x0fffffff);
|
||||
reinterpret_cast<byte*>(instr->InstructionBits() & 0x0FFFFFFF);
|
||||
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
|
||||
"%s", converter_.NameInCode(str));
|
||||
return 3;
|
||||
@ -819,7 +819,7 @@ void Decoder::DecodeType01(Instruction* instr) {
|
||||
Unknown(instr); // not used by V8
|
||||
}
|
||||
}
|
||||
} else if ((instr->Bit(20) == 0) && ((instr->Bits(7, 4) & 0xd) == 0xd)) {
|
||||
} else if ((instr->Bit(20) == 0) && ((instr->Bits(7, 4) & 0xD) == 0xD)) {
|
||||
// ldrd, strd
|
||||
switch (instr->PUField()) {
|
||||
case da_x: {
|
||||
@ -905,7 +905,7 @@ void Decoder::DecodeType01(Instruction* instr) {
|
||||
}
|
||||
} else if ((type == 0) && instr->IsMiscType0()) {
|
||||
if ((instr->Bits(27, 23) == 2) && (instr->Bits(21, 20) == 2) &&
|
||||
(instr->Bits(15, 4) == 0xf00)) {
|
||||
(instr->Bits(15, 4) == 0xF00)) {
|
||||
Format(instr, "msr'cond 'spec_reg'spec_reg_fields, 'rm");
|
||||
} else if ((instr->Bits(27, 23) == 2) && (instr->Bits(21, 20) == 0) &&
|
||||
(instr->Bits(11, 0) == 0)) {
|
||||
@ -1285,8 +1285,8 @@ void Decoder::DecodeType3(Instruction* instr) {
|
||||
}
|
||||
} else {
|
||||
// PU == 0b01, BW == 0b11, Bits(9, 6) != 0b0001
|
||||
if ((instr->Bits(20, 16) == 0x1f) &&
|
||||
(instr->Bits(11, 4) == 0xf3)) {
|
||||
if ((instr->Bits(20, 16) == 0x1F) &&
|
||||
(instr->Bits(11, 4) == 0xF3)) {
|
||||
Format(instr, "rbit'cond 'rd, 'rm");
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
@ -1561,7 +1561,7 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
|
||||
const char* rt_name = converter_.NameOfCPURegister(instr->RtValue());
|
||||
if (instr->Bit(23) == 0) {
|
||||
int opc1_opc2 = (instr->Bits(22, 21) << 2) | instr->Bits(6, 5);
|
||||
if ((opc1_opc2 & 0xb) == 0) {
|
||||
if ((opc1_opc2 & 0xB) == 0) {
|
||||
// NeonS32/NeonU32
|
||||
if (instr->Bit(21) == 0x0) {
|
||||
Format(instr, "vmov'cond.32 'Dd[0], 'rt");
|
||||
@ -1597,7 +1597,7 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
|
||||
}
|
||||
} else if ((instr->VLValue() == 0x1) && (instr->VCValue() == 0x1)) {
|
||||
int opc1_opc2 = (instr->Bits(22, 21) << 2) | instr->Bits(6, 5);
|
||||
if ((opc1_opc2 & 0xb) == 0) {
|
||||
if ((opc1_opc2 & 0xB) == 0) {
|
||||
// NeonS32 / NeonU32
|
||||
if (instr->Bit(21) == 0x0) {
|
||||
Format(instr, "vmov'cond.32 'rt, 'Dd[0]");
|
||||
@ -1972,7 +1972,7 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 0xa: {
|
||||
case 0xA: {
|
||||
// vpmin/vpmax.s<size> Dd, Dm, Dn.
|
||||
const char* op = instr->Bit(4) == 1 ? "vpmin" : "vpmax";
|
||||
out_buffer_pos_ +=
|
||||
@ -1980,14 +1980,14 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
|
||||
op, size, Vd, Vn, Vm);
|
||||
break;
|
||||
}
|
||||
case 0xb: {
|
||||
case 0xB: {
|
||||
// vpadd.i<size> Dd, Dm, Dn.
|
||||
out_buffer_pos_ +=
|
||||
SNPrintF(out_buffer_ + out_buffer_pos_, "vpadd.i%d d%d, d%d, d%d",
|
||||
size, Vd, Vn, Vm);
|
||||
break;
|
||||
}
|
||||
case 0xd: {
|
||||
case 0xD: {
|
||||
if (instr->Bit(4) == 0) {
|
||||
const char* op = (instr->Bits(21, 20) == 0) ? "vadd" : "vsub";
|
||||
// vadd/vsub.f32 Qd, Qm, Qn.
|
||||
@ -1998,7 +1998,7 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 0xe: {
|
||||
case 0xE: {
|
||||
if (instr->Bits(21, 20) == 0 && instr->Bit(4) == 0) {
|
||||
// vceq.f32 Qd, Qm, Qn.
|
||||
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
|
||||
@ -2008,7 +2008,7 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 0xf: {
|
||||
case 0xF: {
|
||||
if (instr->Bit(20) == 0 && instr->Bit(6) == 1) {
|
||||
if (instr->Bit(4) == 1) {
|
||||
// vrecps/vrsqrts.f32 Qd, Qm, Qn.
|
||||
@ -2158,7 +2158,7 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 0xa: {
|
||||
case 0xA: {
|
||||
// vpmin/vpmax.u<size> Dd, Dm, Dn.
|
||||
const char* op = instr->Bit(4) == 1 ? "vpmin" : "vpmax";
|
||||
out_buffer_pos_ +=
|
||||
@ -2166,7 +2166,7 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
|
||||
op, size, Vd, Vn, Vm);
|
||||
break;
|
||||
}
|
||||
case 0xd: {
|
||||
case 0xD: {
|
||||
if (instr->Bits(21, 20) == 0 && instr->Bit(6) == 1 &&
|
||||
instr->Bit(4) == 1) {
|
||||
// vmul.f32 Qd, Qm, Qn
|
||||
@ -2182,7 +2182,7 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 0xe: {
|
||||
case 0xE: {
|
||||
if (instr->Bit(20) == 0 && instr->Bit(4) == 0) {
|
||||
const char* op = (instr->Bit(21) == 0) ? "vcge" : "vcgt";
|
||||
// vcge/vcgt.f32 Qd, Qm, Qn.
|
||||
@ -2332,12 +2332,12 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
|
||||
instr->Bit(6) == 1) {
|
||||
int size = kBitsPerByte * (1 << instr->Bits(19, 18));
|
||||
char type = instr->Bit(10) != 0 ? 'f' : 's';
|
||||
if (instr->Bits(9, 6) == 0xd) {
|
||||
if (instr->Bits(9, 6) == 0xD) {
|
||||
// vabs<type>.<size> Qd, Qm.
|
||||
out_buffer_pos_ +=
|
||||
SNPrintF(out_buffer_ + out_buffer_pos_, "vabs.%c%d q%d, q%d",
|
||||
type, size, Vd, Vm);
|
||||
} else if (instr->Bits(9, 6) == 0xf) {
|
||||
} else if (instr->Bits(9, 6) == 0xF) {
|
||||
// vneg<type>.<size> Qd, Qm.
|
||||
out_buffer_pos_ +=
|
||||
SNPrintF(out_buffer_ + out_buffer_pos_, "vneg.%c%d q%d, q%d",
|
||||
@ -2423,7 +2423,7 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
|
||||
break;
|
||||
case 0xA:
|
||||
case 0xB:
|
||||
if ((instr->Bits(22, 20) == 5) && (instr->Bits(15, 12) == 0xf)) {
|
||||
if ((instr->Bits(22, 20) == 5) && (instr->Bits(15, 12) == 0xF)) {
|
||||
const char* rn_name = converter_.NameOfCPURegister(instr->Bits(19, 16));
|
||||
int offset = instr->Bits(11, 0);
|
||||
if (offset == 0) {
|
||||
@ -2601,14 +2601,6 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
|
||||
"constant pool begin (length %d)",
|
||||
DecodeConstantPoolLength(instruction_bits));
|
||||
return Instruction::kInstrSize;
|
||||
} else if (instruction_bits == kCodeAgeJumpInstruction) {
|
||||
// The code age prologue has a constant immediately following the jump
|
||||
// instruction.
|
||||
Instruction* target = Instruction::At(instr_ptr + Instruction::kInstrSize);
|
||||
DecodeType2(instr);
|
||||
SNPrintF(out_buffer_ + out_buffer_pos_,
|
||||
" (0x%08x)", target->InstructionBits());
|
||||
return 2 * Instruction::kInstrSize;
|
||||
}
|
||||
switch (instr->TypeValue()) {
|
||||
case 0:
|
||||
|
7
deps/v8/src/arm/interface-descriptors-arm.cc
vendored
7
deps/v8/src/arm/interface-descriptors-arm.cc
vendored
@ -45,8 +45,6 @@ const Register LoadDescriptor::SlotRegister() { return r0; }
|
||||
|
||||
const Register LoadWithVectorDescriptor::VectorRegister() { return r3; }
|
||||
|
||||
const Register LoadICProtoArrayDescriptor::HandlerRegister() { return r4; }
|
||||
|
||||
const Register StoreDescriptor::ReceiverRegister() { return r1; }
|
||||
const Register StoreDescriptor::NameRegister() { return r2; }
|
||||
const Register StoreDescriptor::ValueRegister() { return r0; }
|
||||
@ -204,6 +202,11 @@ void TransitionElementsKindDescriptor::InitializePlatformSpecific(
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
void AbortJSDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
Register registers[] = {r1};
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
|
96
deps/v8/src/arm/macro-assembler-arm.cc
vendored
96
deps/v8/src/arm/macro-assembler-arm.cc
vendored
@ -224,44 +224,6 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
|
||||
Call(code.address(), rmode, cond, mode);
|
||||
}
|
||||
|
||||
void MacroAssembler::CallDeoptimizer(Address target) {
|
||||
BlockConstPoolScope block_const_pool(this);
|
||||
|
||||
uintptr_t target_raw = reinterpret_cast<uintptr_t>(target);
|
||||
|
||||
// Use ip directly instead of using UseScratchRegisterScope, as we do not
|
||||
// preserve scratch registers across calls.
|
||||
|
||||
// We use blx, like a call, but it does not return here. The link register is
|
||||
// used by the deoptimizer to work out what called it.
|
||||
if (CpuFeatures::IsSupported(ARMv7)) {
|
||||
CpuFeatureScope scope(this, ARMv7);
|
||||
movw(ip, target_raw & 0xffff);
|
||||
movt(ip, (target_raw >> 16) & 0xffff);
|
||||
blx(ip);
|
||||
} else {
|
||||
// We need to load a literal, but we can't use the usual constant pool
|
||||
// because we call this from a patcher, and cannot afford the guard
|
||||
// instruction and other administrative overhead.
|
||||
ldr(ip, MemOperand(pc, (2 * kInstrSize) - kPcLoadDelta));
|
||||
blx(ip);
|
||||
dd(target_raw);
|
||||
}
|
||||
}
|
||||
|
||||
int MacroAssembler::CallDeoptimizerSize() {
|
||||
// ARMv7+:
|
||||
// movw ip, ...
|
||||
// movt ip, ...
|
||||
// blx ip @ This never returns.
|
||||
//
|
||||
// ARMv6:
|
||||
// ldr ip, =address
|
||||
// blx ip @ This never returns.
|
||||
// .word address
|
||||
return 3 * kInstrSize;
|
||||
}
|
||||
|
||||
void TurboAssembler::Ret(Condition cond) { bx(lr, cond); }
|
||||
|
||||
void TurboAssembler::Drop(int count, Condition cond) {
|
||||
@ -608,7 +570,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
|
||||
Register scratch = temps.Acquire();
|
||||
ldr(scratch, MemOperand(address));
|
||||
cmp(scratch, value);
|
||||
Check(eq, kWrongAddressOrValuePassedToRecordWrite);
|
||||
Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
|
||||
}
|
||||
|
||||
if (remembered_set_action == OMIT_REMEMBERED_SET &&
|
||||
@ -985,7 +947,7 @@ void TurboAssembler::LslPair(Register dst_low, Register dst_high,
|
||||
rsb(scratch, shift, Operand(32), SetCC);
|
||||
b(gt, &less_than_32);
|
||||
// If shift >= 32
|
||||
and_(scratch, shift, Operand(0x1f));
|
||||
and_(scratch, shift, Operand(0x1F));
|
||||
lsl(dst_high, src_low, Operand(scratch));
|
||||
mov(dst_low, Operand(0));
|
||||
jmp(&done);
|
||||
@ -1010,7 +972,7 @@ void TurboAssembler::LslPair(Register dst_low, Register dst_high,
|
||||
Move(dst_high, src_low);
|
||||
Move(dst_low, Operand(0));
|
||||
} else if (shift >= 32) {
|
||||
shift &= 0x1f;
|
||||
shift &= 0x1F;
|
||||
lsl(dst_high, src_low, Operand(shift));
|
||||
mov(dst_low, Operand(0));
|
||||
} else {
|
||||
@ -1031,7 +993,7 @@ void TurboAssembler::LsrPair(Register dst_low, Register dst_high,
|
||||
rsb(scratch, shift, Operand(32), SetCC);
|
||||
b(gt, &less_than_32);
|
||||
// If shift >= 32
|
||||
and_(scratch, shift, Operand(0x1f));
|
||||
and_(scratch, shift, Operand(0x1F));
|
||||
lsr(dst_low, src_high, Operand(scratch));
|
||||
mov(dst_high, Operand(0));
|
||||
jmp(&done);
|
||||
@ -1054,7 +1016,7 @@ void TurboAssembler::LsrPair(Register dst_low, Register dst_high,
|
||||
mov(dst_low, src_high);
|
||||
mov(dst_high, Operand(0));
|
||||
} else if (shift > 32) {
|
||||
shift &= 0x1f;
|
||||
shift &= 0x1F;
|
||||
lsr(dst_low, src_high, Operand(shift));
|
||||
mov(dst_high, Operand(0));
|
||||
} else if (shift == 0) {
|
||||
@ -1078,7 +1040,7 @@ void TurboAssembler::AsrPair(Register dst_low, Register dst_high,
|
||||
rsb(scratch, shift, Operand(32), SetCC);
|
||||
b(gt, &less_than_32);
|
||||
// If shift >= 32
|
||||
and_(scratch, shift, Operand(0x1f));
|
||||
and_(scratch, shift, Operand(0x1F));
|
||||
asr(dst_low, src_high, Operand(scratch));
|
||||
asr(dst_high, src_high, Operand(31));
|
||||
jmp(&done);
|
||||
@ -1100,7 +1062,7 @@ void TurboAssembler::AsrPair(Register dst_low, Register dst_high,
|
||||
mov(dst_low, src_high);
|
||||
asr(dst_high, src_high, Operand(31));
|
||||
} else if (shift > 32) {
|
||||
shift &= 0x1f;
|
||||
shift &= 0x1F;
|
||||
asr(dst_low, src_high, Operand(shift));
|
||||
asr(dst_high, src_high, Operand(31));
|
||||
} else if (shift == 0) {
|
||||
@ -1218,7 +1180,6 @@ int TurboAssembler::ActivationFrameAlignment() {
|
||||
#endif // V8_HOST_ARCH_ARM
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
|
||||
bool argument_count_is_length) {
|
||||
ConstantPoolUnavailableScope constant_pool_unavailable(this);
|
||||
@ -1244,6 +1205,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
|
||||
Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
|
||||
ldr(cp, MemOperand(scratch));
|
||||
#ifdef DEBUG
|
||||
mov(r3, Operand(Context::kInvalidContext));
|
||||
mov(scratch,
|
||||
Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
|
||||
str(r3, MemOperand(scratch));
|
||||
@ -1307,7 +1269,7 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
|
||||
|
||||
if (FLAG_debug_code) {
|
||||
cmp(src_reg, dst_reg);
|
||||
Check(lo, kStackAccessBelowStackPointer);
|
||||
Check(lo, AbortReason::kStackAccessBelowStackPointer);
|
||||
}
|
||||
|
||||
// Restore caller's frame pointer and return address now as they will be
|
||||
@ -1539,15 +1501,15 @@ void MacroAssembler::MaybeDropFrames() {
|
||||
|
||||
void MacroAssembler::PushStackHandler() {
|
||||
// Adjust this code if not the case.
|
||||
STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
|
||||
STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
|
||||
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
|
||||
|
||||
Push(Smi::kZero); // Padding.
|
||||
// Link the current handler as the next handler.
|
||||
mov(r6,
|
||||
Operand(ExternalReference(IsolateAddressId::kHandlerAddress, isolate())));
|
||||
ldr(r5, MemOperand(r6));
|
||||
push(r5);
|
||||
|
||||
// Set this new handler as the current one.
|
||||
str(sp, MemOperand(r6));
|
||||
}
|
||||
@ -1560,8 +1522,8 @@ void MacroAssembler::PopStackHandler() {
|
||||
pop(r1);
|
||||
mov(scratch,
|
||||
Operand(ExternalReference(IsolateAddressId::kHandlerAddress, isolate())));
|
||||
add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
|
||||
str(r1, MemOperand(scratch));
|
||||
add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
|
||||
}
|
||||
|
||||
|
||||
@ -1660,9 +1622,9 @@ void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register scratch = temps.Acquire();
|
||||
|
||||
// If result is not saturated (0x7fffffff or 0x80000000), we are done.
|
||||
// If result is not saturated (0x7FFFFFFF or 0x80000000), we are done.
|
||||
sub(scratch, result, Operand(1));
|
||||
cmp(scratch, Operand(0x7ffffffe));
|
||||
cmp(scratch, Operand(0x7FFFFFFE));
|
||||
b(lt, done);
|
||||
}
|
||||
|
||||
@ -1765,12 +1727,12 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
|
||||
}
|
||||
}
|
||||
|
||||
void TurboAssembler::Assert(Condition cond, BailoutReason reason) {
|
||||
void TurboAssembler::Assert(Condition cond, AbortReason reason) {
|
||||
if (emit_debug_code())
|
||||
Check(cond, reason);
|
||||
}
|
||||
|
||||
void TurboAssembler::Check(Condition cond, BailoutReason reason) {
|
||||
void TurboAssembler::Check(Condition cond, AbortReason reason) {
|
||||
Label L;
|
||||
b(cond, &L);
|
||||
Abort(reason);
|
||||
@ -1778,11 +1740,11 @@ void TurboAssembler::Check(Condition cond, BailoutReason reason) {
|
||||
bind(&L);
|
||||
}
|
||||
|
||||
void TurboAssembler::Abort(BailoutReason reason) {
|
||||
void TurboAssembler::Abort(AbortReason reason) {
|
||||
Label abort_start;
|
||||
bind(&abort_start);
|
||||
#ifdef DEBUG
|
||||
const char* msg = GetBailoutReason(reason);
|
||||
const char* msg = GetAbortReason(reason);
|
||||
if (msg != nullptr) {
|
||||
RecordComment("Abort message: ");
|
||||
RecordComment(msg);
|
||||
@ -1873,7 +1835,7 @@ void MacroAssembler::AssertNotSmi(Register object) {
|
||||
if (emit_debug_code()) {
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
tst(object, Operand(kSmiTagMask));
|
||||
Check(ne, kOperandIsASmi);
|
||||
Check(ne, AbortReason::kOperandIsASmi);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1882,7 +1844,7 @@ void MacroAssembler::AssertSmi(Register object) {
|
||||
if (emit_debug_code()) {
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
tst(object, Operand(kSmiTagMask));
|
||||
Check(eq, kOperandIsNotSmi);
|
||||
Check(eq, AbortReason::kOperandIsNotASmi);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1890,11 +1852,11 @@ void MacroAssembler::AssertFixedArray(Register object) {
|
||||
if (emit_debug_code()) {
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
tst(object, Operand(kSmiTagMask));
|
||||
Check(ne, kOperandIsASmiAndNotAFixedArray);
|
||||
Check(ne, AbortReason::kOperandIsASmiAndNotAFixedArray);
|
||||
push(object);
|
||||
CompareObjectType(object, object, object, FIXED_ARRAY_TYPE);
|
||||
pop(object);
|
||||
Check(eq, kOperandIsNotAFixedArray);
|
||||
Check(eq, AbortReason::kOperandIsNotAFixedArray);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1902,11 +1864,11 @@ void MacroAssembler::AssertFunction(Register object) {
|
||||
if (emit_debug_code()) {
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
tst(object, Operand(kSmiTagMask));
|
||||
Check(ne, kOperandIsASmiAndNotAFunction);
|
||||
Check(ne, AbortReason::kOperandIsASmiAndNotAFunction);
|
||||
push(object);
|
||||
CompareObjectType(object, object, object, JS_FUNCTION_TYPE);
|
||||
pop(object);
|
||||
Check(eq, kOperandIsNotAFunction);
|
||||
Check(eq, AbortReason::kOperandIsNotAFunction);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1915,18 +1877,18 @@ void MacroAssembler::AssertBoundFunction(Register object) {
|
||||
if (emit_debug_code()) {
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
tst(object, Operand(kSmiTagMask));
|
||||
Check(ne, kOperandIsASmiAndNotABoundFunction);
|
||||
Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction);
|
||||
push(object);
|
||||
CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
|
||||
pop(object);
|
||||
Check(eq, kOperandIsNotABoundFunction);
|
||||
Check(eq, AbortReason::kOperandIsNotABoundFunction);
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::AssertGeneratorObject(Register object) {
|
||||
if (!emit_debug_code()) return;
|
||||
tst(object, Operand(kSmiTagMask));
|
||||
Check(ne, kOperandIsASmiAndNotAGeneratorObject);
|
||||
Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
|
||||
|
||||
// Load map
|
||||
Register map = object;
|
||||
@ -1945,7 +1907,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
|
||||
bind(&do_check);
|
||||
// Restore generator object to register and perform assertion
|
||||
pop(object);
|
||||
Check(eq, kOperandIsNotAGeneratorObject);
|
||||
Check(eq, AbortReason::kOperandIsNotAGeneratorObject);
|
||||
}
|
||||
|
||||
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
|
||||
@ -1957,7 +1919,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
|
||||
b(eq, &done_checking);
|
||||
ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
|
||||
CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
|
||||
Assert(eq, kExpectedUndefinedOrCell);
|
||||
Assert(eq, AbortReason::kExpectedUndefinedOrCell);
|
||||
bind(&done_checking);
|
||||
}
|
||||
}
|
||||
|
10
deps/v8/src/arm/macro-assembler-arm.h
vendored
10
deps/v8/src/arm/macro-assembler-arm.h
vendored
@ -294,13 +294,13 @@ class TurboAssembler : public Assembler {
|
||||
|
||||
// Calls Abort(msg) if the condition cond is not satisfied.
|
||||
// Use --debug_code to enable.
|
||||
void Assert(Condition cond, BailoutReason reason);
|
||||
void Assert(Condition cond, AbortReason reason);
|
||||
|
||||
// Like Assert(), but always enabled.
|
||||
void Check(Condition cond, BailoutReason reason);
|
||||
void Check(Condition cond, AbortReason reason);
|
||||
|
||||
// Print a message to stdout and abort execution.
|
||||
void Abort(BailoutReason msg);
|
||||
void Abort(AbortReason msg);
|
||||
|
||||
inline bool AllowThisStubCall(CodeStub* stub);
|
||||
|
||||
@ -579,10 +579,6 @@ class MacroAssembler : public TurboAssembler {
|
||||
MacroAssembler(Isolate* isolate, void* buffer, int size,
|
||||
CodeObjectRequired create_code_object);
|
||||
|
||||
// Used for patching in calls to the deoptimizer.
|
||||
void CallDeoptimizer(Address target);
|
||||
static int CallDeoptimizerSize();
|
||||
|
||||
// Swap two registers. If the scratch register is omitted then a slightly
|
||||
// less efficient form using xor instead of mov is emitted.
|
||||
void Swap(Register reg1, Register reg2, Register scratch = no_reg,
|
||||
|
238
deps/v8/src/arm/simulator-arm.cc
vendored
238
deps/v8/src/arm/simulator-arm.cc
vendored
@ -259,11 +259,9 @@ void ArmDebugger::Debug() {
|
||||
for (int i = 0; i < DwVfpRegister::NumRegisters(); i++) {
|
||||
dvalue = GetVFPDoubleRegisterValue(i);
|
||||
uint64_t as_words = bit_cast<uint64_t>(dvalue);
|
||||
PrintF("%3s: %f 0x%08x %08x\n",
|
||||
VFPRegisters::Name(i, true),
|
||||
dvalue,
|
||||
static_cast<uint32_t>(as_words >> 32),
|
||||
static_cast<uint32_t>(as_words & 0xffffffff));
|
||||
PrintF("%3s: %f 0x%08x %08x\n", VFPRegisters::Name(i, true),
|
||||
dvalue, static_cast<uint32_t>(as_words >> 32),
|
||||
static_cast<uint32_t>(as_words & 0xFFFFFFFF));
|
||||
}
|
||||
} else {
|
||||
if (GetValue(arg1, &value)) {
|
||||
@ -273,11 +271,9 @@ void ArmDebugger::Debug() {
|
||||
PrintF("%s: %f 0x%08x\n", arg1, svalue, as_word);
|
||||
} else if (GetVFPDoubleValue(arg1, &dvalue)) {
|
||||
uint64_t as_words = bit_cast<uint64_t>(dvalue);
|
||||
PrintF("%s: %f 0x%08x %08x\n",
|
||||
arg1,
|
||||
dvalue,
|
||||
PrintF("%s: %f 0x%08x %08x\n", arg1, dvalue,
|
||||
static_cast<uint32_t>(as_words >> 32),
|
||||
static_cast<uint32_t>(as_words & 0xffffffff));
|
||||
static_cast<uint32_t>(as_words & 0xFFFFFFFF));
|
||||
} else {
|
||||
PrintF("%s unrecognized\n", arg1);
|
||||
}
|
||||
@ -575,6 +571,10 @@ void Simulator::set_last_debugger_input(char* input) {
|
||||
last_debugger_input_ = input;
|
||||
}
|
||||
|
||||
void Simulator::SetRedirectInstruction(Instruction* instruction) {
|
||||
instruction->SetInstructionBits(al | (0xF * B24) | kCallRtRedirected);
|
||||
}
|
||||
|
||||
void Simulator::FlushICache(base::CustomMatcherHashMap* i_cache,
|
||||
void* start_addr, size_t size) {
|
||||
intptr_t start = reinterpret_cast<intptr_t>(start_addr);
|
||||
@ -644,21 +644,12 @@ void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
|
||||
}
|
||||
|
||||
|
||||
void Simulator::Initialize(Isolate* isolate) {
|
||||
if (isolate->simulator_initialized()) return;
|
||||
isolate->set_simulator_initialized(true);
|
||||
::v8::internal::ExternalReference::set_redirector(isolate,
|
||||
&RedirectExternalReference);
|
||||
}
|
||||
|
||||
|
||||
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
|
||||
i_cache_ = isolate_->simulator_i_cache();
|
||||
if (i_cache_ == nullptr) {
|
||||
i_cache_ = new base::CustomMatcherHashMap(&ICacheMatch);
|
||||
isolate_->set_simulator_i_cache(i_cache_);
|
||||
}
|
||||
Initialize(isolate);
|
||||
// Set up simulator support first. Some of this information is needed to
|
||||
// setup the architecture state.
|
||||
size_t stack_size = 1 * 1024*1024; // allocate 1MB for stack
|
||||
@ -715,100 +706,6 @@ Simulator::~Simulator() {
|
||||
free(stack_);
|
||||
}
|
||||
|
||||
// When the generated code calls an external reference we need to catch that in
|
||||
// the simulator. The external reference will be a function compiled for the
|
||||
// host architecture. We need to call that function instead of trying to
|
||||
// execute it with the simulator. We do that by redirecting the external
|
||||
// reference to a svc (Supervisor Call) instruction that is handled by
|
||||
// the simulator. We write the original destination of the jump just at a known
|
||||
// offset from the svc instruction so the simulator knows what to call.
|
||||
class Redirection {
|
||||
public:
|
||||
Redirection(Isolate* isolate, void* external_function,
|
||||
ExternalReference::Type type)
|
||||
: external_function_(external_function),
|
||||
swi_instruction_(al | (0xf * B24) | kCallRtRedirected),
|
||||
type_(type),
|
||||
next_(nullptr) {
|
||||
next_ = isolate->simulator_redirection();
|
||||
Simulator::current(isolate)->
|
||||
FlushICache(isolate->simulator_i_cache(),
|
||||
reinterpret_cast<void*>(&swi_instruction_),
|
||||
Instruction::kInstrSize);
|
||||
isolate->set_simulator_redirection(this);
|
||||
}
|
||||
|
||||
void* address_of_swi_instruction() {
|
||||
return reinterpret_cast<void*>(&swi_instruction_);
|
||||
}
|
||||
|
||||
void* external_function() { return external_function_; }
|
||||
ExternalReference::Type type() { return type_; }
|
||||
|
||||
static Redirection* Get(Isolate* isolate, void* external_function,
|
||||
ExternalReference::Type type) {
|
||||
Redirection* current = isolate->simulator_redirection();
|
||||
for (; current != nullptr; current = current->next_) {
|
||||
if (current->external_function_ == external_function &&
|
||||
current->type_ == type) {
|
||||
return current;
|
||||
}
|
||||
}
|
||||
return new Redirection(isolate, external_function, type);
|
||||
}
|
||||
|
||||
static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
|
||||
char* addr_of_swi = reinterpret_cast<char*>(swi_instruction);
|
||||
char* addr_of_redirection =
|
||||
addr_of_swi - offsetof(Redirection, swi_instruction_);
|
||||
return reinterpret_cast<Redirection*>(addr_of_redirection);
|
||||
}
|
||||
|
||||
static void* ReverseRedirection(int32_t reg) {
|
||||
Redirection* redirection = FromSwiInstruction(
|
||||
reinterpret_cast<Instruction*>(reinterpret_cast<void*>(reg)));
|
||||
return redirection->external_function();
|
||||
}
|
||||
|
||||
static void DeleteChain(Redirection* redirection) {
|
||||
while (redirection != nullptr) {
|
||||
Redirection* next = redirection->next_;
|
||||
delete redirection;
|
||||
redirection = next;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
void* external_function_;
|
||||
uint32_t swi_instruction_;
|
||||
ExternalReference::Type type_;
|
||||
Redirection* next_;
|
||||
};
|
||||
|
||||
|
||||
// static
|
||||
void Simulator::TearDown(base::CustomMatcherHashMap* i_cache,
|
||||
Redirection* first) {
|
||||
Redirection::DeleteChain(first);
|
||||
if (i_cache != nullptr) {
|
||||
for (base::HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
|
||||
entry = i_cache->Next(entry)) {
|
||||
delete static_cast<CachePage*>(entry->value);
|
||||
}
|
||||
delete i_cache;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void* Simulator::RedirectExternalReference(Isolate* isolate,
|
||||
void* external_function,
|
||||
ExternalReference::Type type) {
|
||||
base::LockGuard<base::Mutex> lock_guard(
|
||||
isolate->simulator_redirection_mutex());
|
||||
Redirection* redirection = Redirection::Get(isolate, external_function, type);
|
||||
return redirection->address_of_swi_instruction();
|
||||
}
|
||||
|
||||
|
||||
// Get the active Simulator for the current thread.
|
||||
Simulator* Simulator::current(Isolate* isolate) {
|
||||
@ -1035,9 +932,9 @@ void Simulator::SetFpResult(const double& result) {
|
||||
|
||||
void Simulator::TrashCallerSaveRegisters() {
|
||||
// We don't trash the registers with the return value.
|
||||
registers_[2] = 0x50Bad4U;
|
||||
registers_[3] = 0x50Bad4U;
|
||||
registers_[12] = 0x50Bad4U;
|
||||
registers_[2] = 0x50BAD4U;
|
||||
registers_[3] = 0x50BAD4U;
|
||||
registers_[12] = 0x50BAD4U;
|
||||
}
|
||||
|
||||
|
||||
@ -1292,7 +1189,7 @@ void Simulator::SetVFlag(bool val) {
|
||||
bool Simulator::CarryFrom(int32_t left, int32_t right, int32_t carry) {
|
||||
uint32_t uleft = static_cast<uint32_t>(left);
|
||||
uint32_t uright = static_cast<uint32_t>(right);
|
||||
uint32_t urest = 0xffffffffU - uleft;
|
||||
uint32_t urest = 0xFFFFFFFFU - uleft;
|
||||
|
||||
return (uright > urest) ||
|
||||
(carry && (((uright + 1) > urest) || (uright > (urest - 1))));
|
||||
@ -1409,7 +1306,7 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
|
||||
case ASR: {
|
||||
if (shift_amount == 0) {
|
||||
if (result < 0) {
|
||||
result = 0xffffffff;
|
||||
result = 0xFFFFFFFF;
|
||||
*carry_out = true;
|
||||
} else {
|
||||
result = 0;
|
||||
@ -1468,7 +1365,7 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
|
||||
} else {
|
||||
// by register
|
||||
int rs = instr->RsValue();
|
||||
shift_amount = get_register(rs) &0xff;
|
||||
shift_amount = get_register(rs) & 0xFF;
|
||||
switch (shift) {
|
||||
case ASR: {
|
||||
if (shift_amount == 0) {
|
||||
@ -1481,7 +1378,7 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
|
||||
DCHECK_GE(shift_amount, 32);
|
||||
if (result < 0) {
|
||||
*carry_out = true;
|
||||
result = 0xffffffff;
|
||||
result = 0xFFFFFFFF;
|
||||
} else {
|
||||
*carry_out = false;
|
||||
result = 0;
|
||||
@ -1739,7 +1636,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
|
||||
bool stack_aligned =
|
||||
(get_register(sp)
|
||||
& (::v8::internal::FLAG_sim_stack_alignment - 1)) == 0;
|
||||
Redirection* redirection = Redirection::FromSwiInstruction(instr);
|
||||
Redirection* redirection = Redirection::FromInstruction(instr);
|
||||
int32_t arg0 = get_register(r0);
|
||||
int32_t arg1 = get_register(r1);
|
||||
int32_t arg2 = get_register(r2);
|
||||
@ -1982,7 +1879,7 @@ Float32 Simulator::canonicalizeNaN(Float32 value) {
|
||||
double Simulator::canonicalizeNaN(double value) {
|
||||
// Default NaN value, see "NaN handling" in "IEEE 754 standard implementation
|
||||
// choices" of the ARM Reference Manual.
|
||||
constexpr uint64_t kDefaultNaN = V8_UINT64_C(0x7FF8000000000000);
|
||||
constexpr uint64_t kDefaultNaN = uint64_t{0x7FF8000000000000};
|
||||
if (FPSCR_default_NaN_mode_ && std::isnan(value)) {
|
||||
value = bit_cast<double>(kDefaultNaN);
|
||||
}
|
||||
@ -1993,7 +1890,7 @@ Float64 Simulator::canonicalizeNaN(Float64 value) {
|
||||
// Default NaN value, see "NaN handling" in "IEEE 754 standard implementation
|
||||
// choices" of the ARM Reference Manual.
|
||||
constexpr Float64 kDefaultNaN =
|
||||
Float64::FromBits(V8_UINT64_C(0x7FF8000000000000));
|
||||
Float64::FromBits(uint64_t{0x7FF8000000000000});
|
||||
return FPSCR_default_NaN_mode_ && value.is_nan() ? kDefaultNaN : value;
|
||||
}
|
||||
|
||||
@ -2036,7 +1933,7 @@ void Simulator::DisableStop(uint32_t code) {
|
||||
void Simulator::IncreaseStopCounter(uint32_t code) {
|
||||
DCHECK_LE(code, kMaxStopCode);
|
||||
DCHECK(isWatchedStop(code));
|
||||
if ((watched_stops_[code].count & ~(1 << 31)) == 0x7fffffff) {
|
||||
if ((watched_stops_[code].count & ~(1 << 31)) == 0x7FFFFFFF) {
|
||||
PrintF("Stop counter for code %i has overflowed.\n"
|
||||
"Enabling this code and reseting the counter to 0.\n", code);
|
||||
watched_stops_[code].count = 0;
|
||||
@ -2137,14 +2034,14 @@ void Simulator::DecodeType01(Instruction* instr) {
|
||||
int64_t right_op = static_cast<int32_t>(rs_val);
|
||||
uint64_t result = left_op * right_op;
|
||||
hi_res = static_cast<int32_t>(result >> 32);
|
||||
lo_res = static_cast<int32_t>(result & 0xffffffff);
|
||||
lo_res = static_cast<int32_t>(result & 0xFFFFFFFF);
|
||||
} else {
|
||||
// unsigned multiply
|
||||
uint64_t left_op = static_cast<uint32_t>(rm_val);
|
||||
uint64_t right_op = static_cast<uint32_t>(rs_val);
|
||||
uint64_t result = left_op * right_op;
|
||||
hi_res = static_cast<int32_t>(result >> 32);
|
||||
lo_res = static_cast<int32_t>(result & 0xffffffff);
|
||||
lo_res = static_cast<int32_t>(result & 0xFFFFFFFF);
|
||||
}
|
||||
set_register(rd_lo, lo_res);
|
||||
set_register(rd_hi, hi_res);
|
||||
@ -2316,7 +2213,7 @@ void Simulator::DecodeType01(Instruction* instr) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if (((instr->Bits(7, 4) & 0xd) == 0xd) && (instr->Bit(20) == 0)) {
|
||||
if (((instr->Bits(7, 4) & 0xD) == 0xD) && (instr->Bit(20) == 0)) {
|
||||
DCHECK_EQ(rd % 2, 0);
|
||||
if (instr->HasH()) {
|
||||
// The strd instruction.
|
||||
@ -2357,7 +2254,7 @@ void Simulator::DecodeType01(Instruction* instr) {
|
||||
}
|
||||
} else if ((type == 0) && instr->IsMiscType0()) {
|
||||
if ((instr->Bits(27, 23) == 2) && (instr->Bits(21, 20) == 2) &&
|
||||
(instr->Bits(15, 4) == 0xf00)) {
|
||||
(instr->Bits(15, 4) == 0xF00)) {
|
||||
// MSR
|
||||
int rm = instr->RmValue();
|
||||
DCHECK_NE(pc, rm); // UNPREDICTABLE
|
||||
@ -2569,8 +2466,8 @@ void Simulator::DecodeType01(Instruction* instr) {
|
||||
SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, false));
|
||||
} else {
|
||||
// Format(instr, "movt'cond 'rd, 'imm").
|
||||
alu_out = (get_register(rd) & 0xffff) |
|
||||
(instr->ImmedMovwMovtValue() << 16);
|
||||
alu_out =
|
||||
(get_register(rd) & 0xFFFF) | (instr->ImmedMovwMovtValue() << 16);
|
||||
set_register(rd, alu_out);
|
||||
}
|
||||
break;
|
||||
@ -2987,8 +2884,8 @@ void Simulator::DecodeType3(Instruction* instr) {
|
||||
}
|
||||
} else {
|
||||
// PU == 0b01, BW == 0b11, Bits(9, 6) != 0b0001
|
||||
if ((instr->Bits(20, 16) == 0x1f) &&
|
||||
(instr->Bits(11, 4) == 0xf3)) {
|
||||
if ((instr->Bits(20, 16) == 0x1F) &&
|
||||
(instr->Bits(11, 4) == 0xF3)) {
|
||||
// Rbit.
|
||||
uint32_t rm_val = get_register(instr->RmValue());
|
||||
set_register(rd, base::bits::ReverseBits(rm_val));
|
||||
@ -3084,7 +2981,7 @@ void Simulator::DecodeType3(Instruction* instr) {
|
||||
uint32_t rd_val =
|
||||
static_cast<uint32_t>(get_register(instr->RdValue()));
|
||||
uint32_t bitcount = msbit - lsbit + 1;
|
||||
uint32_t mask = 0xffffffffu >> (32 - bitcount);
|
||||
uint32_t mask = 0xFFFFFFFFu >> (32 - bitcount);
|
||||
rd_val &= ~(mask << lsbit);
|
||||
if (instr->RmValue() != 15) {
|
||||
// bfi - bitfield insert.
|
||||
@ -3422,7 +3319,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
|
||||
int vd = instr->VFPNRegValue(kDoublePrecision);
|
||||
int rt = instr->RtValue();
|
||||
int opc1_opc2 = (instr->Bits(22, 21) << 2) | instr->Bits(6, 5);
|
||||
if ((opc1_opc2 & 0xb) == 0) {
|
||||
if ((opc1_opc2 & 0xB) == 0) {
|
||||
// NeonS32/NeonU32
|
||||
uint32_t data[2];
|
||||
get_d_register(vd, data);
|
||||
@ -3500,7 +3397,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
|
||||
int opc1_opc2 = (instr->Bits(22, 21) << 2) | instr->Bits(6, 5);
|
||||
uint64_t data;
|
||||
get_d_register(vn, &data);
|
||||
if ((opc1_opc2 & 0xb) == 0) {
|
||||
if ((opc1_opc2 & 0xB) == 0) {
|
||||
// NeonS32 / NeonU32
|
||||
int32_t int_data[2];
|
||||
memcpy(int_data, &data, sizeof(int_data));
|
||||
@ -3514,14 +3411,14 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
|
||||
int i = opc1_opc2 & 0x7;
|
||||
int shift = i * kBitsPerByte;
|
||||
uint32_t scalar = (data >> shift) & 0xFFu;
|
||||
if (!u && (scalar & 0x80) != 0) scalar |= 0xffffff00;
|
||||
if (!u && (scalar & 0x80) != 0) scalar |= 0xFFFFFF00;
|
||||
set_register(rt, scalar);
|
||||
} else if ((opc1_opc2 & 0x1) != 0) {
|
||||
// NeonS16 / NeonU16
|
||||
int i = (opc1_opc2 >> 1) & 0x3;
|
||||
int shift = i * kBitsPerByte * kShortSize;
|
||||
uint32_t scalar = (data >> shift) & 0xFFFFu;
|
||||
if (!u && (scalar & 0x8000) != 0) scalar |= 0xffff0000;
|
||||
if (!u && (scalar & 0x8000) != 0) scalar |= 0xFFFF0000;
|
||||
set_register(rt, scalar);
|
||||
} else {
|
||||
UNREACHABLE(); // Not used by V8.
|
||||
@ -3702,7 +3599,7 @@ bool get_inv_op_vfp_flag(VFPRoundingMode mode,
|
||||
double val,
|
||||
bool unsigned_) {
|
||||
DCHECK((mode == RN) || (mode == RM) || (mode == RZ));
|
||||
double max_uint = static_cast<double>(0xffffffffu);
|
||||
double max_uint = static_cast<double>(0xFFFFFFFFu);
|
||||
double max_int = static_cast<double>(kMaxInt);
|
||||
double min_int = static_cast<double>(kMinInt);
|
||||
|
||||
@ -3744,7 +3641,7 @@ int VFPConversionSaturate(double val, bool unsigned_res) {
|
||||
return 0;
|
||||
} else {
|
||||
if (unsigned_res) {
|
||||
return (val < 0) ? 0 : 0xffffffffu;
|
||||
return (val < 0) ? 0 : 0xFFFFFFFFu;
|
||||
} else {
|
||||
return (val < 0) ? kMinInt : kMaxInt;
|
||||
}
|
||||
@ -4496,7 +4393,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 0xa: {
|
||||
case 0xA: {
|
||||
// vpmin/vpmax.s<size> Dd, Dm, Dn.
|
||||
NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
|
||||
bool min = instr->Bit(4) != 0;
|
||||
@ -4516,7 +4413,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 0xb: {
|
||||
case 0xB: {
|
||||
// vpadd.i<size> Dd, Dm, Dn.
|
||||
NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
|
||||
switch (size) {
|
||||
@ -4535,7 +4432,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 0xd: {
|
||||
case 0xD: {
|
||||
if (instr->Bit(4) == 0) {
|
||||
float src1[4], src2[4];
|
||||
get_neon_register(Vn, src1);
|
||||
@ -4555,7 +4452,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 0xe: {
|
||||
case 0xE: {
|
||||
if (instr->Bits(21, 20) == 0 && instr->Bit(4) == 0) {
|
||||
// vceq.f32.
|
||||
float src1[4], src2[4];
|
||||
@ -4571,7 +4468,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 0xf: {
|
||||
case 0xF: {
|
||||
if (instr->Bit(20) == 0 && instr->Bit(6) == 1) {
|
||||
float src1[4], src2[4];
|
||||
get_neon_register(Vn, src1);
|
||||
@ -4862,7 +4759,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 0xa: {
|
||||
case 0xA: {
|
||||
// vpmin/vpmax.u<size> Dd, Dm, Dn.
|
||||
NeonSize size = static_cast<NeonSize>(instr->Bits(21, 20));
|
||||
bool min = instr->Bit(4) != 0;
|
||||
@ -4882,7 +4779,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 0xd: {
|
||||
case 0xD: {
|
||||
if (instr->Bits(21, 20) == 0 && instr->Bit(6) == 1 &&
|
||||
instr->Bit(4) == 1) {
|
||||
// vmul.f32 Qd, Qn, Qm
|
||||
@ -4902,7 +4799,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 0xe: {
|
||||
case 0xE: {
|
||||
if (instr->Bit(20) == 0 && instr->Bit(4) == 0) {
|
||||
// vcge/vcgt.f32 Qd, Qm, Qn
|
||||
bool ge = instr->Bit(21) == 0;
|
||||
@ -5014,15 +4911,15 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
|
||||
if ((imm4 & 0x1) != 0) {
|
||||
size = 8;
|
||||
index = imm4 >> 1;
|
||||
mask = 0xffu;
|
||||
mask = 0xFFu;
|
||||
} else if ((imm4 & 0x2) != 0) {
|
||||
size = 16;
|
||||
index = imm4 >> 2;
|
||||
mask = 0xffffu;
|
||||
mask = 0xFFFFu;
|
||||
} else {
|
||||
size = 32;
|
||||
index = imm4 >> 3;
|
||||
mask = 0xffffffffu;
|
||||
mask = 0xFFFFFFFFu;
|
||||
}
|
||||
uint64_t d_data;
|
||||
get_d_register(vm, &d_data);
|
||||
@ -5275,7 +5172,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
|
||||
int Vd = instr->VFPDRegValue(kSimd128Precision);
|
||||
int Vm = instr->VFPMRegValue(kSimd128Precision);
|
||||
NeonSize size = static_cast<NeonSize>(instr->Bits(19, 18));
|
||||
if (instr->Bits(9, 6) == 0xd) {
|
||||
if (instr->Bits(9, 6) == 0xD) {
|
||||
// vabs<type>.<size> Qd, Qm
|
||||
if (instr->Bit(10) != 0) {
|
||||
// floating point (clear sign bits)
|
||||
@ -5302,7 +5199,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else if (instr->Bits(9, 6) == 0xf) {
|
||||
} else if (instr->Bits(9, 6) == 0xF) {
|
||||
// vneg<type>.<size> Qd, Qm (signed integer)
|
||||
if (instr->Bit(10) != 0) {
|
||||
// floating point (toggle sign bits)
|
||||
@ -5561,7 +5458,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
|
||||
break;
|
||||
case 0xA:
|
||||
case 0xB:
|
||||
if ((instr->Bits(22, 20) == 5) && (instr->Bits(15, 12) == 0xf)) {
|
||||
if ((instr->Bits(22, 20) == 5) && (instr->Bits(15, 12) == 0xF)) {
|
||||
// pld: ignore instruction.
|
||||
} else if (instr->SpecialValue() == 0xA && instr->Bits(22, 20) == 7) {
|
||||
// dsb, dmb, isb: ignore instruction for now.
|
||||
@ -5893,18 +5790,16 @@ void Simulator::CallInternal(byte* entry) {
|
||||
set_register(r11, r11_val);
|
||||
}
|
||||
|
||||
|
||||
int32_t Simulator::Call(byte* entry, int argument_count, ...) {
|
||||
va_list parameters;
|
||||
va_start(parameters, argument_count);
|
||||
intptr_t Simulator::CallImpl(byte* entry, int argument_count,
|
||||
const intptr_t* arguments) {
|
||||
// Set up arguments
|
||||
|
||||
// First four arguments passed in registers.
|
||||
DCHECK_GE(argument_count, 4);
|
||||
set_register(r0, va_arg(parameters, int32_t));
|
||||
set_register(r1, va_arg(parameters, int32_t));
|
||||
set_register(r2, va_arg(parameters, int32_t));
|
||||
set_register(r3, va_arg(parameters, int32_t));
|
||||
int reg_arg_count = std::min(4, argument_count);
|
||||
if (reg_arg_count > 0) set_register(r0, arguments[0]);
|
||||
if (reg_arg_count > 1) set_register(r1, arguments[1]);
|
||||
if (reg_arg_count > 2) set_register(r2, arguments[2]);
|
||||
if (reg_arg_count > 3) set_register(r3, arguments[3]);
|
||||
|
||||
// Remaining arguments passed on stack.
|
||||
int original_stack = get_register(sp);
|
||||
@ -5914,11 +5809,8 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
|
||||
entry_stack &= -base::OS::ActivationFrameAlignment();
|
||||
}
|
||||
// Store remaining arguments on stack, from low to high memory.
|
||||
intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
|
||||
for (int i = 4; i < argument_count; i++) {
|
||||
stack_argument[i - 4] = va_arg(parameters, int32_t);
|
||||
}
|
||||
va_end(parameters);
|
||||
memcpy(reinterpret_cast<intptr_t*>(entry_stack), arguments + reg_arg_count,
|
||||
(argument_count - reg_arg_count) * sizeof(*arguments));
|
||||
set_register(sp, entry_stack);
|
||||
|
||||
CallInternal(entry);
|
||||
@ -5927,12 +5819,10 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
|
||||
CHECK_EQ(entry_stack, get_register(sp));
|
||||
set_register(sp, original_stack);
|
||||
|
||||
int32_t result = get_register(r0);
|
||||
return result;
|
||||
return get_register(r0);
|
||||
}
|
||||
|
||||
|
||||
void Simulator::CallFP(byte* entry, double d0, double d1) {
|
||||
int32_t Simulator::CallFPImpl(byte* entry, double d0, double d1) {
|
||||
if (use_eabi_hardfloat()) {
|
||||
set_d_register_from_double(0, d0);
|
||||
set_d_register_from_double(1, d1);
|
||||
@ -5941,13 +5831,7 @@ void Simulator::CallFP(byte* entry, double d0, double d1) {
|
||||
set_register_pair_from_double(2, &d1);
|
||||
}
|
||||
CallInternal(entry);
|
||||
}
|
||||
|
||||
|
||||
int32_t Simulator::CallFPReturnsInt(byte* entry, double d0, double d1) {
|
||||
CallFP(entry, d0, d1);
|
||||
int32_t result = get_register(r0);
|
||||
return result;
|
||||
return get_register(r0);
|
||||
}
|
||||
|
||||
|
||||
|
124
deps/v8/src/arm/simulator-arm.h
vendored
124
deps/v8/src/arm/simulator-arm.h
vendored
@ -2,11 +2,10 @@
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
|
||||
// Declares a Simulator for ARM instructions if we are not generating a native
|
||||
// ARM binary. This Simulator allows us to run and debug ARM code generation on
|
||||
// regular desktop machines.
|
||||
// V8 calls into generated code by "calling" the CALL_GENERATED_CODE macro,
|
||||
// V8 calls into generated code by using the GeneratedCode class,
|
||||
// which will start execution in the Simulator or forwards to the real entry
|
||||
// on a ARM HW platform.
|
||||
|
||||
@ -18,56 +17,13 @@
|
||||
#include "src/base/platform/mutex.h"
|
||||
#include "src/boxed-float.h"
|
||||
|
||||
#if !defined(USE_SIMULATOR)
|
||||
// Running without a simulator on a native arm platform.
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
// When running without a simulator we call the entry directly.
|
||||
#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
|
||||
(entry(p0, p1, p2, p3, p4))
|
||||
|
||||
typedef int (*arm_regexp_matcher)(String*, int, const byte*, const byte*, int*,
|
||||
int, Address, int, Isolate*);
|
||||
|
||||
// Call the generated regexp code directly. The code at the entry address
|
||||
// should act as a function matching the type arm_regexp_matcher.
|
||||
#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
|
||||
p7, p8) \
|
||||
(FUNCTION_CAST<arm_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, p8))
|
||||
|
||||
// The stack limit beyond which we will throw stack overflow errors in
|
||||
// generated code. Because generated code on arm uses the C stack, we
|
||||
// just use the C stack limit.
|
||||
class SimulatorStack : public v8::internal::AllStatic {
|
||||
public:
|
||||
static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
|
||||
uintptr_t c_limit) {
|
||||
USE(isolate);
|
||||
return c_limit;
|
||||
}
|
||||
|
||||
static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
|
||||
uintptr_t try_catch_address) {
|
||||
USE(isolate);
|
||||
return try_catch_address;
|
||||
}
|
||||
|
||||
static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
|
||||
USE(isolate);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#else // !defined(USE_SIMULATOR)
|
||||
#if defined(USE_SIMULATOR)
|
||||
// Running with a simulator.
|
||||
|
||||
#include "src/arm/constants-arm.h"
|
||||
#include "src/assembler.h"
|
||||
#include "src/base/hashmap.h"
|
||||
#include "src/simulator-base.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
@ -102,8 +58,7 @@ class CachePage {
|
||||
char validity_map_[kValidityMapSize]; // One byte per line.
|
||||
};
|
||||
|
||||
|
||||
class Simulator {
|
||||
class Simulator : public SimulatorBase {
|
||||
public:
|
||||
friend class ArmDebugger;
|
||||
enum Register {
|
||||
@ -134,7 +89,7 @@ class Simulator {
|
||||
|
||||
// The currently executing Simulator instance. Potentially there can be one
|
||||
// for each native thread.
|
||||
static Simulator* current(v8::internal::Isolate* isolate);
|
||||
V8_EXPORT_PRIVATE static Simulator* current(v8::internal::Isolate* isolate);
|
||||
|
||||
// Accessors for register state. Reading the pc value adheres to the ARM
|
||||
// architecture specification and is off by a 8 from the currently executing
|
||||
@ -203,18 +158,16 @@ class Simulator {
|
||||
// Executes ARM instructions until the PC reaches end_sim_pc.
|
||||
void Execute();
|
||||
|
||||
// Call on program start.
|
||||
static void Initialize(Isolate* isolate);
|
||||
template <typename Return, typename... Args>
|
||||
Return Call(byte* entry, Args... args) {
|
||||
return VariadicCall<Return>(this, &Simulator::CallImpl, entry, args...);
|
||||
}
|
||||
|
||||
static void TearDown(base::CustomMatcherHashMap* i_cache, Redirection* first);
|
||||
|
||||
// V8 generally calls into generated JS code with 5 parameters and into
|
||||
// generated RegExp code with 7 parameters. This is a convenience function,
|
||||
// which sets up the simulator state and grabs the result on return.
|
||||
int32_t Call(byte* entry, int argument_count, ...);
|
||||
// Alternative: call a 2-argument double function.
|
||||
void CallFP(byte* entry, double d0, double d1);
|
||||
int32_t CallFPReturnsInt(byte* entry, double d0, double d1);
|
||||
template <typename Return>
|
||||
Return CallFP(byte* entry, double d0, double d1) {
|
||||
return ConvertReturn<Return>(CallFPImpl(entry, d0, d1));
|
||||
}
|
||||
|
||||
// Push an address onto the JS stack.
|
||||
uintptr_t PushAddress(uintptr_t address);
|
||||
@ -226,6 +179,9 @@ class Simulator {
|
||||
void set_last_debugger_input(char* input);
|
||||
char* last_debugger_input() { return last_debugger_input_; }
|
||||
|
||||
// Redirection support.
|
||||
static void SetRedirectInstruction(Instruction* instruction);
|
||||
|
||||
// ICache checking.
|
||||
static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
|
||||
size_t size);
|
||||
@ -255,6 +211,10 @@ class Simulator {
|
||||
end_sim_pc = -2
|
||||
};
|
||||
|
||||
V8_EXPORT_PRIVATE intptr_t CallImpl(byte* entry, int argument_count,
|
||||
const intptr_t* arguments);
|
||||
intptr_t CallFPImpl(byte* entry, double d0, double d1);
|
||||
|
||||
// Unsupported instructions use Format to print an error and stop execution.
|
||||
void Format(Instruction* instr, const char* format);
|
||||
|
||||
@ -369,11 +329,6 @@ class Simulator {
|
||||
static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache,
|
||||
void* page);
|
||||
|
||||
// Runtime call support. Uses the isolate in a thread-safe way.
|
||||
static void* RedirectExternalReference(
|
||||
Isolate* isolate, void* external_function,
|
||||
v8::internal::ExternalReference::Type type);
|
||||
|
||||
// Handle arguments and return value for runtime FP functions.
|
||||
void GetFpArgs(double* x, double* y, int32_t* z);
|
||||
void SetFpResult(const double& result);
|
||||
@ -541,45 +496,8 @@ class Simulator {
|
||||
static base::LazyInstance<GlobalMonitor>::type global_monitor_;
|
||||
};
|
||||
|
||||
|
||||
// When running with the simulator transition into simulated execution at this
|
||||
// point.
|
||||
#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
|
||||
reinterpret_cast<Object*>(Simulator::current(isolate)->Call( \
|
||||
FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
|
||||
|
||||
#define CALL_GENERATED_FP_INT(isolate, entry, p0, p1) \
|
||||
Simulator::current(isolate)->CallFPReturnsInt(FUNCTION_ADDR(entry), p0, p1)
|
||||
|
||||
#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
|
||||
p7, p8) \
|
||||
Simulator::current(isolate)->Call(entry, 9, p0, p1, p2, p3, p4, p5, p6, p7, \
|
||||
p8)
|
||||
|
||||
// The simulator has its own stack. Thus it has a different stack limit from
|
||||
// the C-based native code. The JS-based limit normally points near the end of
|
||||
// the simulator stack. When the C-based limit is exhausted we reflect that by
|
||||
// lowering the JS-based limit as well, to make stack checks trigger.
|
||||
class SimulatorStack : public v8::internal::AllStatic {
|
||||
public:
|
||||
static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
|
||||
uintptr_t c_limit) {
|
||||
return Simulator::current(isolate)->StackLimit(c_limit);
|
||||
}
|
||||
|
||||
static inline uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
|
||||
uintptr_t try_catch_address) {
|
||||
Simulator* sim = Simulator::current(isolate);
|
||||
return sim->PushAddress(try_catch_address);
|
||||
}
|
||||
|
||||
static inline void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
|
||||
Simulator::current(isolate)->PopAddress();
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // !defined(USE_SIMULATOR)
|
||||
#endif // defined(USE_SIMULATOR)
|
||||
#endif // V8_ARM_SIMULATOR_ARM_H_
|
||||
|
30
deps/v8/src/arm64/assembler-arm64-inl.h
vendored
30
deps/v8/src/arm64/assembler-arm64-inl.h
vendored
@ -532,12 +532,6 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
|
||||
}
|
||||
|
||||
|
||||
Address Assembler::target_address_at(Address pc, Code* code) {
|
||||
Address constant_pool = code ? code->constant_pool() : nullptr;
|
||||
return target_address_at(pc, constant_pool);
|
||||
}
|
||||
|
||||
|
||||
Address Assembler::target_address_from_return_address(Address pc) {
|
||||
// Returns the address of the call target from the return address that will
|
||||
// be returned to after a call.
|
||||
@ -615,14 +609,6 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
|
||||
}
|
||||
|
||||
|
||||
void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
|
||||
Address target,
|
||||
ICacheFlushMode icache_flush_mode) {
|
||||
Address constant_pool = code ? code->constant_pool() : nullptr;
|
||||
set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
|
||||
}
|
||||
|
||||
|
||||
int RelocInfo::target_address_size() {
|
||||
return kPointerSize;
|
||||
}
|
||||
@ -630,7 +616,7 @@ int RelocInfo::target_address_size() {
|
||||
|
||||
Address RelocInfo::target_address() {
|
||||
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
|
||||
return Assembler::target_address_at(pc_, host_);
|
||||
return Assembler::target_address_at(pc_, constant_pool_);
|
||||
}
|
||||
|
||||
Address RelocInfo::target_address_address() {
|
||||
@ -647,21 +633,21 @@ Address RelocInfo::constant_pool_entry_address() {
|
||||
|
||||
HeapObject* RelocInfo::target_object() {
|
||||
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
|
||||
return HeapObject::cast(
|
||||
reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_)));
|
||||
return HeapObject::cast(reinterpret_cast<Object*>(
|
||||
Assembler::target_address_at(pc_, constant_pool_)));
|
||||
}
|
||||
|
||||
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
|
||||
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
|
||||
return Handle<HeapObject>(
|
||||
reinterpret_cast<HeapObject**>(Assembler::target_address_at(pc_, host_)));
|
||||
return Handle<HeapObject>(reinterpret_cast<HeapObject**>(
|
||||
Assembler::target_address_at(pc_, constant_pool_)));
|
||||
}
|
||||
|
||||
void RelocInfo::set_target_object(HeapObject* target,
|
||||
WriteBarrierMode write_barrier_mode,
|
||||
ICacheFlushMode icache_flush_mode) {
|
||||
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
|
||||
Assembler::set_target_address_at(target->GetIsolate(), pc_, host_,
|
||||
Assembler::set_target_address_at(target->GetIsolate(), pc_, constant_pool_,
|
||||
reinterpret_cast<Address>(target),
|
||||
icache_flush_mode);
|
||||
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
|
||||
@ -674,7 +660,7 @@ void RelocInfo::set_target_object(HeapObject* target,
|
||||
|
||||
Address RelocInfo::target_external_reference() {
|
||||
DCHECK(rmode_ == EXTERNAL_REFERENCE);
|
||||
return Assembler::target_address_at(pc_, host_);
|
||||
return Assembler::target_address_at(pc_, constant_pool_);
|
||||
}
|
||||
|
||||
|
||||
@ -711,7 +697,7 @@ void RelocInfo::WipeOut(Isolate* isolate) {
|
||||
if (IsInternalReference(rmode_)) {
|
||||
Memory::Address_at(pc_) = nullptr;
|
||||
} else {
|
||||
Assembler::set_target_address_at(isolate, pc_, host_, nullptr);
|
||||
Assembler::set_target_address_at(isolate, pc_, constant_pool_, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
|
56
deps/v8/src/arm64/assembler-arm64.cc
vendored
56
deps/v8/src/arm64/assembler-arm64.cc
vendored
@ -147,9 +147,6 @@ CPURegList CPURegList::GetSafepointSavedRegisters() {
|
||||
// is a caller-saved register according to the procedure call standard.
|
||||
list.Combine(18);
|
||||
|
||||
// Drop jssp as the stack pointer doesn't need to be included.
|
||||
list.Remove(28);
|
||||
|
||||
// Add the link register (x30) to the safepoint list.
|
||||
list.Combine(30);
|
||||
|
||||
@ -186,7 +183,8 @@ uint32_t RelocInfo::embedded_size() const {
|
||||
|
||||
void RelocInfo::set_embedded_address(Isolate* isolate, Address address,
|
||||
ICacheFlushMode flush_mode) {
|
||||
Assembler::set_target_address_at(isolate, pc_, host_, address, flush_mode);
|
||||
Assembler::set_target_address_at(isolate, pc_, constant_pool_, address,
|
||||
flush_mode);
|
||||
}
|
||||
|
||||
void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
|
||||
@ -2636,7 +2634,7 @@ Instr Assembler::LoadStoreStructAddrModeField(const MemOperand& addr) {
|
||||
} else {
|
||||
// The immediate post index addressing mode is indicated by rm = 31.
|
||||
// The immediate is implied by the number of vector registers used.
|
||||
addr_field |= (0x1f << Rm_offset);
|
||||
addr_field |= (0x1F << Rm_offset);
|
||||
}
|
||||
} else {
|
||||
DCHECK(addr.IsImmediateOffset() && (addr.offset() == 0));
|
||||
@ -3003,7 +3001,7 @@ void Assembler::fmov(const VRegister& vd, double imm) {
|
||||
} else {
|
||||
DCHECK(vd.Is2D());
|
||||
Instr op = NEONModifiedImmediate_MOVI | NEONModifiedImmediateOpBit;
|
||||
Emit(NEON_Q | op | ImmNEONFP(imm) | NEONCmode(0xf) | Rd(vd));
|
||||
Emit(NEON_Q | op | ImmNEONFP(imm) | NEONCmode(0xF) | Rd(vd));
|
||||
}
|
||||
}
|
||||
|
||||
@ -3015,7 +3013,7 @@ void Assembler::fmov(const VRegister& vd, float imm) {
|
||||
DCHECK(vd.Is2S() | vd.Is4S());
|
||||
Instr op = NEONModifiedImmediate_MOVI;
|
||||
Instr q = vd.Is4S() ? NEON_Q : 0;
|
||||
Emit(q | op | ImmNEONFP(imm) | NEONCmode(0xf) | Rd(vd));
|
||||
Emit(q | op | ImmNEONFP(imm) | NEONCmode(0xF) | Rd(vd));
|
||||
}
|
||||
}
|
||||
|
||||
@ -3596,15 +3594,15 @@ void Assembler::movi(const VRegister& vd, const uint64_t imm, Shift shift,
|
||||
DCHECK_EQ(shift_amount, 0);
|
||||
int imm8 = 0;
|
||||
for (int i = 0; i < 8; ++i) {
|
||||
int byte = (imm >> (i * 8)) & 0xff;
|
||||
DCHECK((byte == 0) || (byte == 0xff));
|
||||
if (byte == 0xff) {
|
||||
int byte = (imm >> (i * 8)) & 0xFF;
|
||||
DCHECK((byte == 0) || (byte == 0xFF));
|
||||
if (byte == 0xFF) {
|
||||
imm8 |= (1 << i);
|
||||
}
|
||||
}
|
||||
Instr q = vd.Is2D() ? NEON_Q : 0;
|
||||
Emit(q | NEONModImmOp(1) | NEONModifiedImmediate_MOVI |
|
||||
ImmNEONabcdefgh(imm8) | NEONCmode(0xe) | Rd(vd));
|
||||
ImmNEONabcdefgh(imm8) | NEONCmode(0xE) | Rd(vd));
|
||||
} else if (shift == LSL) {
|
||||
NEONModifiedImmShiftLsl(vd, static_cast<int>(imm), shift_amount,
|
||||
NEONModifiedImmediate_MOVI);
|
||||
@ -3953,7 +3951,7 @@ uint32_t Assembler::FPToImm8(double imm) {
|
||||
// bit6: 0b00.0000
|
||||
uint64_t bit6 = ((bits >> 61) & 0x1) << 6;
|
||||
// bit5_to_0: 00cd.efgh
|
||||
uint64_t bit5_to_0 = (bits >> 48) & 0x3f;
|
||||
uint64_t bit5_to_0 = (bits >> 48) & 0x3F;
|
||||
|
||||
return static_cast<uint32_t>(bit7 | bit6 | bit5_to_0);
|
||||
}
|
||||
@ -3971,7 +3969,7 @@ void Assembler::MoveWide(const Register& rd, uint64_t imm, int shift,
|
||||
// Check that the top 32 bits are zero (a positive 32-bit number) or top
|
||||
// 33 bits are one (a negative 32-bit number, sign extended to 64 bits).
|
||||
DCHECK(((imm >> kWRegSizeInBits) == 0) ||
|
||||
((imm >> (kWRegSizeInBits - 1)) == 0x1ffffffff));
|
||||
((imm >> (kWRegSizeInBits - 1)) == 0x1FFFFFFFF));
|
||||
imm &= kWRegMask;
|
||||
}
|
||||
|
||||
@ -3984,16 +3982,16 @@ void Assembler::MoveWide(const Register& rd, uint64_t imm, int shift,
|
||||
// Calculate a new immediate and shift combination to encode the immediate
|
||||
// argument.
|
||||
shift = 0;
|
||||
if ((imm & ~0xffffUL) == 0) {
|
||||
if ((imm & ~0xFFFFUL) == 0) {
|
||||
// Nothing to do.
|
||||
} else if ((imm & ~(0xffffUL << 16)) == 0) {
|
||||
} else if ((imm & ~(0xFFFFUL << 16)) == 0) {
|
||||
imm >>= 16;
|
||||
shift = 1;
|
||||
} else if ((imm & ~(0xffffUL << 32)) == 0) {
|
||||
} else if ((imm & ~(0xFFFFUL << 32)) == 0) {
|
||||
DCHECK(rd.Is64Bits());
|
||||
imm >>= 32;
|
||||
shift = 2;
|
||||
} else if ((imm & ~(0xffffUL << 48)) == 0) {
|
||||
} else if ((imm & ~(0xFFFFUL << 48)) == 0) {
|
||||
DCHECK(rd.Is64Bits());
|
||||
imm >>= 48;
|
||||
shift = 3;
|
||||
@ -4247,7 +4245,7 @@ void Assembler::NEONModifiedImmShiftMsl(const VRegister& vd, const int imm8,
|
||||
DCHECK(is_uint8(imm8));
|
||||
|
||||
int cmode_0 = (shift_amount >> 4) & 1;
|
||||
int cmode = 0xc | cmode_0;
|
||||
int cmode = 0xC | cmode_0;
|
||||
|
||||
Instr q = vd.IsQ() ? NEON_Q : 0;
|
||||
|
||||
@ -4343,7 +4341,7 @@ void Assembler::DataProcExtendedRegister(const Register& rd,
|
||||
|
||||
bool Assembler::IsImmAddSub(int64_t immediate) {
|
||||
return is_uint12(immediate) ||
|
||||
(is_uint12(immediate >> 12) && ((immediate & 0xfff) == 0));
|
||||
(is_uint12(immediate >> 12) && ((immediate & 0xFFF) == 0));
|
||||
}
|
||||
|
||||
void Assembler::LoadStore(const CPURegister& rt,
|
||||
@ -4526,7 +4524,7 @@ bool Assembler::IsImmLogical(uint64_t value,
|
||||
clz_a = CountLeadingZeros(a, kXRegSizeInBits);
|
||||
int clz_c = CountLeadingZeros(c, kXRegSizeInBits);
|
||||
d = clz_a - clz_c;
|
||||
mask = ((V8_UINT64_C(1) << d) - 1);
|
||||
mask = ((uint64_t{1} << d) - 1);
|
||||
out_n = 0;
|
||||
} else {
|
||||
// Handle degenerate cases.
|
||||
@ -4547,7 +4545,7 @@ bool Assembler::IsImmLogical(uint64_t value,
|
||||
// the general case above, and set the N bit in the output.
|
||||
clz_a = CountLeadingZeros(a, kXRegSizeInBits);
|
||||
d = 64;
|
||||
mask = ~V8_UINT64_C(0);
|
||||
mask = ~uint64_t{0};
|
||||
out_n = 1;
|
||||
}
|
||||
}
|
||||
@ -4596,7 +4594,7 @@ bool Assembler::IsImmLogical(uint64_t value,
|
||||
|
||||
// Count the set bits in our basic stretch. The special case of clz(0) == -1
|
||||
// makes the answer come out right for stretches that reach the very top of
|
||||
// the word (e.g. numbers like 0xffffc00000000000).
|
||||
// the word (e.g. numbers like 0xFFFFC00000000000).
|
||||
int clz_b = (b == 0) ? -1 : CountLeadingZeros(b, kXRegSizeInBits);
|
||||
int s = clz_a - clz_b;
|
||||
|
||||
@ -4628,7 +4626,7 @@ bool Assembler::IsImmLogical(uint64_t value,
|
||||
//
|
||||
// So we 'or' (-d << 1) with our computed s to form imms.
|
||||
*n = out_n;
|
||||
*imm_s = ((-d << 1) | (s - 1)) & 0x3f;
|
||||
*imm_s = ((-d << 1) | (s - 1)) & 0x3F;
|
||||
*imm_r = r;
|
||||
|
||||
return true;
|
||||
@ -4645,13 +4643,13 @@ bool Assembler::IsImmFP32(float imm) {
|
||||
// aBbb.bbbc.defg.h000.0000.0000.0000.0000
|
||||
uint32_t bits = bit_cast<uint32_t>(imm);
|
||||
// bits[19..0] are cleared.
|
||||
if ((bits & 0x7ffff) != 0) {
|
||||
if ((bits & 0x7FFFF) != 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// bits[29..25] are all set or all cleared.
|
||||
uint32_t b_pattern = (bits >> 16) & 0x3e00;
|
||||
if (b_pattern != 0 && b_pattern != 0x3e00) {
|
||||
uint32_t b_pattern = (bits >> 16) & 0x3E00;
|
||||
if (b_pattern != 0 && b_pattern != 0x3E00) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -4670,13 +4668,13 @@ bool Assembler::IsImmFP64(double imm) {
|
||||
// 0000.0000.0000.0000.0000.0000.0000.0000
|
||||
uint64_t bits = bit_cast<uint64_t>(imm);
|
||||
// bits[47..0] are cleared.
|
||||
if ((bits & 0xffffffffffffL) != 0) {
|
||||
if ((bits & 0xFFFFFFFFFFFFL) != 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// bits[61..54] are all set or all cleared.
|
||||
uint32_t b_pattern = (bits >> 48) & 0x3fc0;
|
||||
if (b_pattern != 0 && b_pattern != 0x3fc0) {
|
||||
uint32_t b_pattern = (bits >> 48) & 0x3FC0;
|
||||
if (b_pattern != 0 && b_pattern != 0x3FC0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
15
deps/v8/src/arm64/assembler-arm64.h
vendored
15
deps/v8/src/arm64/assembler-arm64.h
vendored
@ -39,7 +39,8 @@ namespace internal {
|
||||
#define ALLOCATABLE_GENERAL_REGISTERS(R) \
|
||||
R(x0) R(x1) R(x2) R(x3) R(x4) R(x5) R(x6) R(x7) \
|
||||
R(x8) R(x9) R(x10) R(x11) R(x12) R(x13) R(x14) R(x15) \
|
||||
R(x18) R(x19) R(x20) R(x21) R(x22) R(x23) R(x24) R(x27)
|
||||
R(x18) R(x19) R(x20) R(x21) R(x22) R(x23) R(x24) R(x27) \
|
||||
R(x28)
|
||||
|
||||
#define FLOAT_REGISTERS(V) \
|
||||
V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) \
|
||||
@ -295,6 +296,7 @@ class Register : public CPURegister {
|
||||
static_assert(IS_TRIVIALLY_COPYABLE(Register),
|
||||
"Register can efficiently be passed by value");
|
||||
|
||||
constexpr bool kPadArguments = true;
|
||||
constexpr bool kSimpleFPAliasing = true;
|
||||
constexpr bool kSimdMaskRegisters = false;
|
||||
|
||||
@ -479,13 +481,6 @@ ALIAS_REGISTER(Register, root, x26);
|
||||
ALIAS_REGISTER(Register, rr, x26);
|
||||
// Context pointer register.
|
||||
ALIAS_REGISTER(Register, cp, x27);
|
||||
// We use a register as a JS stack pointer to overcome the restriction on the
|
||||
// architectural SP alignment.
|
||||
// We chose x28 because it is contiguous with the other specific purpose
|
||||
// registers.
|
||||
STATIC_ASSERT(kJSSPCode == 28);
|
||||
ALIAS_REGISTER(Register, jssp, x28);
|
||||
ALIAS_REGISTER(Register, wjssp, w28);
|
||||
ALIAS_REGISTER(Register, fp, x29);
|
||||
ALIAS_REGISTER(Register, lr, x30);
|
||||
ALIAS_REGISTER(Register, xzr, x31);
|
||||
@ -1001,10 +996,6 @@ class Assembler : public AssemblerBase {
|
||||
inline static void set_target_address_at(
|
||||
Isolate* isolate, Address pc, Address constant_pool, Address target,
|
||||
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
|
||||
static inline Address target_address_at(Address pc, Code* code);
|
||||
static inline void set_target_address_at(
|
||||
Isolate* isolate, Address pc, Code* code, Address target,
|
||||
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
|
||||
|
||||
// Return the code target address at a call site from the return address of
|
||||
// that call in the instruction stream.
|
||||
|
152
deps/v8/src/arm64/code-stubs-arm64.cc
vendored
152
deps/v8/src/arm64/code-stubs-arm64.cc
vendored
@ -30,7 +30,7 @@ namespace internal {
|
||||
|
||||
void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
|
||||
__ Mov(x5, Operand(x0, LSL, kPointerSizeLog2));
|
||||
__ Str(x1, MemOperand(jssp, x5));
|
||||
__ Str(x1, MemOperand(__ StackPointer(), x5));
|
||||
__ Push(x1, x2);
|
||||
__ Add(x0, x0, Operand(3));
|
||||
__ TailCallRuntime(Runtime::kNewArray);
|
||||
@ -42,7 +42,6 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
|
||||
Register result = destination();
|
||||
|
||||
DCHECK(result.Is64Bits());
|
||||
DCHECK(jssp.Is(masm->StackPointer()));
|
||||
|
||||
UseScratchRegisterScope temps(masm);
|
||||
Register scratch1 = temps.AcquireX();
|
||||
@ -75,7 +74,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
|
||||
if (masm->emit_debug_code()) {
|
||||
__ Cmp(exponent, HeapNumber::kExponentBias + 63);
|
||||
// Exponents less than this should have been handled by the Fcvt case.
|
||||
__ Check(ge, kUnexpectedValue);
|
||||
__ Check(ge, AbortReason::kUnexpectedValue);
|
||||
}
|
||||
|
||||
// Isolate the mantissa bits, and set the implicit '1'.
|
||||
@ -100,8 +99,8 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
|
||||
|
||||
void MathPowStub::Generate(MacroAssembler* masm) {
|
||||
// Stack on entry:
|
||||
// jssp[0]: Exponent (as a tagged value).
|
||||
// jssp[1]: Base (as a tagged value).
|
||||
// sp[0]: Exponent (as a tagged value).
|
||||
// sp[1]: Base (as a tagged value).
|
||||
//
|
||||
// The (tagged) result will be returned in x0, as a heap number.
|
||||
|
||||
@ -276,15 +275,14 @@ void CEntryStub::Generate(MacroAssembler* masm) {
|
||||
// The stack on entry holds the arguments and the receiver, with the receiver
|
||||
// at the highest address:
|
||||
//
|
||||
// jssp]argc-1]: receiver
|
||||
// jssp[argc-2]: arg[argc-2]
|
||||
// sp]argc-1]: receiver
|
||||
// sp[argc-2]: arg[argc-2]
|
||||
// ... ...
|
||||
// jssp[1]: arg[1]
|
||||
// jssp[0]: arg[0]
|
||||
// sp[1]: arg[1]
|
||||
// sp[0]: arg[0]
|
||||
//
|
||||
// The arguments are in reverse order, so that arg[argc-2] is actually the
|
||||
// first argument to the target function and arg[0] is the last.
|
||||
DCHECK(jssp.Is(__ StackPointer()));
|
||||
const Register& argc_input = x0;
|
||||
const Register& target_input = x1;
|
||||
|
||||
@ -385,7 +383,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
|
||||
__ Ldr(temp, MemOperand(fp, ExitFrameConstants::kSPOffset));
|
||||
__ Ldr(temp, MemOperand(temp, -static_cast<int64_t>(kXRegSize)));
|
||||
__ Cmp(temp, x12);
|
||||
__ Check(eq, kReturnAddressNotFoundInFrame);
|
||||
__ Check(eq, AbortReason::kReturnAddressNotFoundInFrame);
|
||||
}
|
||||
|
||||
// Call the builtin.
|
||||
@ -415,8 +413,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
|
||||
__ Peek(argc, 2 * kPointerSize);
|
||||
__ Peek(target, 3 * kPointerSize);
|
||||
|
||||
__ LeaveExitFrame(save_doubles(), x10);
|
||||
DCHECK(jssp.Is(__ StackPointer()));
|
||||
__ LeaveExitFrame(save_doubles(), x10, x9);
|
||||
if (!argv_in_register()) {
|
||||
// Drop the remaining stack slots and return from the stub.
|
||||
__ DropArguments(x11);
|
||||
@ -424,10 +421,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
|
||||
__ AssertFPCRState();
|
||||
__ Ret();
|
||||
|
||||
// The stack pointer is still csp if we aren't returning, and the frame
|
||||
// hasn't changed (except for the return address).
|
||||
__ SetStackPointer(csp);
|
||||
|
||||
// Handling of exception.
|
||||
__ Bind(&exception_returned);
|
||||
|
||||
@ -453,18 +446,16 @@ void CEntryStub::Generate(MacroAssembler* masm) {
|
||||
__ CallCFunction(find_handler, 3);
|
||||
}
|
||||
|
||||
// We didn't execute a return case, so the stack frame hasn't been updated
|
||||
// (except for the return address slot). However, we don't need to initialize
|
||||
// jssp because the throw method will immediately overwrite it when it
|
||||
// unwinds the stack.
|
||||
__ SetStackPointer(jssp);
|
||||
|
||||
// Retrieve the handler context, SP and FP.
|
||||
__ Mov(cp, Operand(pending_handler_context_address));
|
||||
__ Ldr(cp, MemOperand(cp));
|
||||
__ Mov(jssp, Operand(pending_handler_sp_address));
|
||||
__ Ldr(jssp, MemOperand(jssp));
|
||||
__ Mov(csp, jssp);
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
Register scratch = temps.AcquireX();
|
||||
__ Mov(scratch, Operand(pending_handler_sp_address));
|
||||
__ Ldr(scratch, MemOperand(scratch));
|
||||
__ Mov(csp, scratch);
|
||||
}
|
||||
__ Mov(fp, Operand(pending_handler_fp_address));
|
||||
__ Ldr(fp, MemOperand(fp));
|
||||
|
||||
@ -481,9 +472,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
|
||||
__ Br(x10);
|
||||
}
|
||||
|
||||
|
||||
// This is the entry point from C++. 5 arguments are provided in x0-x4.
|
||||
// See use of the CALL_GENERATED_CODE macro for example in src/execution.cc.
|
||||
// See use of the JSEntryFunction for example in src/execution.cc.
|
||||
// Input:
|
||||
// x0: code entry.
|
||||
// x1: function.
|
||||
@ -493,7 +483,6 @@ void CEntryStub::Generate(MacroAssembler* masm) {
|
||||
// Output:
|
||||
// x0: result.
|
||||
void JSEntryStub::Generate(MacroAssembler* masm) {
|
||||
DCHECK(jssp.Is(__ StackPointer()));
|
||||
Register code_entry = x0;
|
||||
|
||||
// Enable instruction instrumentation. This only works on the simulator, and
|
||||
@ -502,21 +491,16 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
|
||||
|
||||
Label invoke, handler_entry, exit;
|
||||
|
||||
// Push callee-saved registers and synchronize the system stack pointer (csp)
|
||||
// and the JavaScript stack pointer (jssp).
|
||||
//
|
||||
// We must not write to jssp until after the PushCalleeSavedRegisters()
|
||||
// call, since jssp is itself a callee-saved register.
|
||||
__ SetStackPointer(csp);
|
||||
__ PushCalleeSavedRegisters();
|
||||
__ Mov(jssp, csp);
|
||||
__ SetStackPointer(jssp);
|
||||
|
||||
ProfileEntryHookStub::MaybeCallEntryHook(masm);
|
||||
|
||||
// Set up the reserved register for 0.0.
|
||||
__ Fmov(fp_zero, 0.0);
|
||||
|
||||
// Initialize the root array register
|
||||
__ InitializeRootRegister();
|
||||
|
||||
// Build an entry frame (see layout below).
|
||||
StackFrame::Type marker = type();
|
||||
int64_t bad_frame_pointer = -1L; // Bad frame pointer to fail if it is used.
|
||||
@ -527,7 +511,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
|
||||
|
||||
__ Push(x13, x12, xzr, x10);
|
||||
// Set up fp.
|
||||
__ Sub(fp, jssp, EntryFrameConstants::kCallerFPOffset);
|
||||
__ Sub(fp, __ StackPointer(), EntryFrameConstants::kCallerFPOffset);
|
||||
|
||||
// Push the JS entry frame marker. Also set js_entry_sp if this is the
|
||||
// outermost JS call.
|
||||
@ -546,14 +530,15 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
|
||||
__ Str(fp, MemOperand(x10));
|
||||
|
||||
__ Bind(&done);
|
||||
__ Push(x12);
|
||||
__ Push(x12, padreg);
|
||||
|
||||
// The frame set up looks like this:
|
||||
// jssp[0] : JS entry frame marker.
|
||||
// jssp[1] : C entry FP.
|
||||
// jssp[2] : stack frame marker.
|
||||
// jssp[3] : stack frame marker.
|
||||
// jssp[4] : bad frame pointer 0xfff...ff <- fp points here.
|
||||
// sp[0] : padding.
|
||||
// sp[1] : JS entry frame marker.
|
||||
// sp[2] : C entry FP.
|
||||
// sp[3] : stack frame marker.
|
||||
// sp[4] : stack frame marker.
|
||||
// sp[5] : bad frame pointer 0xFFF...FF <- fp points here.
|
||||
|
||||
// Jump to a faked try block that does the invoke, with a faked catch
|
||||
// block that sets the pending exception.
|
||||
@ -583,8 +568,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
|
||||
__ Bind(&invoke);
|
||||
|
||||
// Push new stack handler.
|
||||
DCHECK(jssp.Is(__ StackPointer()));
|
||||
static_assert(StackHandlerConstants::kSize == 1 * kPointerSize,
|
||||
static_assert(StackHandlerConstants::kSize == 2 * kPointerSize,
|
||||
"Unexpected offset for StackHandlerConstants::kSize");
|
||||
static_assert(StackHandlerConstants::kNextOffset == 0 * kPointerSize,
|
||||
"Unexpected offset for StackHandlerConstants::kNextOffset");
|
||||
@ -592,10 +576,15 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
|
||||
// Link the current handler as the next handler.
|
||||
__ Mov(x11, ExternalReference(IsolateAddressId::kHandlerAddress, isolate()));
|
||||
__ Ldr(x10, MemOperand(x11));
|
||||
__ Push(x10);
|
||||
__ Push(padreg, x10);
|
||||
|
||||
// Set this new handler as the current one.
|
||||
__ Str(jssp, MemOperand(x11));
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
Register scratch = temps.AcquireX();
|
||||
__ Mov(scratch, __ StackPointer());
|
||||
__ Str(scratch, MemOperand(x11));
|
||||
}
|
||||
|
||||
// If an exception not caught by another handler occurs, this handler
|
||||
// returns control to the code after the B(&invoke) above, which
|
||||
@ -612,37 +601,32 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
|
||||
// x2: receiver.
|
||||
// x3: argc.
|
||||
// x4: argv.
|
||||
|
||||
if (type() == StackFrame::CONSTRUCT_ENTRY) {
|
||||
__ Call(BUILTIN_CODE(isolate(), JSConstructEntryTrampoline),
|
||||
RelocInfo::CODE_TARGET);
|
||||
} else {
|
||||
__ Call(BUILTIN_CODE(isolate(), JSEntryTrampoline), RelocInfo::CODE_TARGET);
|
||||
}
|
||||
__ Call(EntryTrampoline(), RelocInfo::CODE_TARGET);
|
||||
|
||||
// Pop the stack handler and unlink this frame from the handler chain.
|
||||
static_assert(StackHandlerConstants::kNextOffset == 0 * kPointerSize,
|
||||
"Unexpected offset for StackHandlerConstants::kNextOffset");
|
||||
__ Pop(x10);
|
||||
__ Pop(x10, padreg);
|
||||
__ Mov(x11, ExternalReference(IsolateAddressId::kHandlerAddress, isolate()));
|
||||
__ Drop(StackHandlerConstants::kSize - kXRegSize, kByteSizeInBytes);
|
||||
__ Drop(StackHandlerConstants::kSlotCount - 2);
|
||||
__ Str(x10, MemOperand(x11));
|
||||
|
||||
__ Bind(&exit);
|
||||
// x0 holds the result.
|
||||
// The stack pointer points to the top of the entry frame pushed on entry from
|
||||
// C++ (at the beginning of this stub):
|
||||
// jssp[0] : JS entry frame marker.
|
||||
// jssp[1] : C entry FP.
|
||||
// jssp[2] : stack frame marker.
|
||||
// jssp[3] : stack frmae marker.
|
||||
// jssp[4] : bad frame pointer 0xfff...ff <- fp points here.
|
||||
// sp[0] : padding.
|
||||
// sp[1] : JS entry frame marker.
|
||||
// sp[2] : C entry FP.
|
||||
// sp[3] : stack frame marker.
|
||||
// sp[4] : stack frame marker.
|
||||
// sp[5] : bad frame pointer 0xFFF...FF <- fp points here.
|
||||
|
||||
// Check if the current stack frame is marked as the outermost JS frame.
|
||||
Label non_outermost_js_2;
|
||||
{
|
||||
Register c_entry_fp = x11;
|
||||
__ Pop(x10, c_entry_fp);
|
||||
__ PeekPair(x10, c_entry_fp, 1 * kPointerSize);
|
||||
__ Cmp(x10, StackFrame::OUTERMOST_JSENTRY_FRAME);
|
||||
__ B(ne, &non_outermost_js_2);
|
||||
__ Mov(x12, ExternalReference(js_entry_sp));
|
||||
@ -656,21 +640,17 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
|
||||
}
|
||||
|
||||
// Reset the stack to the callee saved registers.
|
||||
__ Drop(-EntryFrameConstants::kCallerFPOffset, kByteSizeInBytes);
|
||||
static_assert(EntryFrameConstants::kFixedFrameSize % (2 * kPointerSize) == 0,
|
||||
"Size of entry frame is not a multiple of 16 bytes");
|
||||
__ Drop(EntryFrameConstants::kFixedFrameSize / kPointerSize);
|
||||
// Restore the callee-saved registers and return.
|
||||
DCHECK(jssp.Is(__ StackPointer()));
|
||||
__ Mov(csp, jssp);
|
||||
__ SetStackPointer(csp);
|
||||
__ PopCalleeSavedRegisters();
|
||||
// After this point, we must not modify jssp because it is a callee-saved
|
||||
// register which we have just restored.
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
// The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
|
||||
// a "Push lr" instruction, followed by a call.
|
||||
// The entry hook is a Push (stp) instruction, followed by a call.
|
||||
static const unsigned int kProfileEntryHookCallSize =
|
||||
Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
|
||||
(1 * kInstructionSize) + Assembler::kCallSizeWithRelocation;
|
||||
|
||||
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
|
||||
Zone* zone) {
|
||||
@ -748,14 +728,6 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
|
||||
|
||||
|
||||
void DirectCEntryStub::Generate(MacroAssembler* masm) {
|
||||
// When calling into C++ code the stack pointer must be csp.
|
||||
// Therefore this code must use csp for peek/poke operations when the
|
||||
// stub is generated. When the stub is called
|
||||
// (via DirectCEntryStub::GenerateCall), the caller must setup an ExitFrame
|
||||
// and configure the stack pointer *before* doing the call.
|
||||
const Register old_stack_pointer = __ StackPointer();
|
||||
__ SetStackPointer(csp);
|
||||
|
||||
// Put return address on the stack (accessible to GC through exit frame pc).
|
||||
__ Poke(lr, 0);
|
||||
// Call the C++ function.
|
||||
@ -764,8 +736,6 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
|
||||
__ Peek(lr, 0);
|
||||
__ AssertFPCRState();
|
||||
__ Ret();
|
||||
|
||||
__ SetStackPointer(old_stack_pointer);
|
||||
}
|
||||
|
||||
void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
|
||||
@ -806,7 +776,7 @@ static void CreateArrayDispatch(MacroAssembler* masm,
|
||||
}
|
||||
|
||||
// If we reached this point there is a problem.
|
||||
__ Abort(kUnexpectedElementsKindInArrayConstructor);
|
||||
__ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
|
||||
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
@ -856,7 +826,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
|
||||
__ Ldr(x10, FieldMemOperand(allocation_site, 0));
|
||||
__ JumpIfNotRoot(x10, Heap::kAllocationSiteMapRootIndex,
|
||||
&normal_sequence);
|
||||
__ Assert(eq, kExpectedAllocationSite);
|
||||
__ Assert(eq, AbortReason::kExpectedAllocationSite);
|
||||
}
|
||||
|
||||
// Save the resulting elements kind in type info. We can't just store 'kind'
|
||||
@ -884,7 +854,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
|
||||
}
|
||||
|
||||
// If we reached this point there is a problem.
|
||||
__ Abort(kUnexpectedElementsKindInArrayConstructor);
|
||||
__ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
}
|
||||
@ -972,7 +942,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
|
||||
__ JumpIfSmi(x10, &unexpected_map);
|
||||
__ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
|
||||
__ Bind(&unexpected_map);
|
||||
__ Abort(kUnexpectedInitialMapForArrayFunction);
|
||||
__ Abort(AbortReason::kUnexpectedInitialMapForArrayFunction);
|
||||
__ Bind(&map_ok);
|
||||
|
||||
// We should either have undefined in the allocation_site register or a
|
||||
@ -1069,7 +1039,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
|
||||
__ JumpIfSmi(x10, &unexpected_map);
|
||||
__ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
|
||||
__ Bind(&unexpected_map);
|
||||
__ Abort(kUnexpectedInitialMapForArrayFunction);
|
||||
__ Abort(AbortReason::kUnexpectedInitialMapForArrayFunction);
|
||||
__ Bind(&map_ok);
|
||||
}
|
||||
|
||||
@ -1085,7 +1055,9 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
|
||||
Label done;
|
||||
__ Cmp(x3, PACKED_ELEMENTS);
|
||||
__ Ccmp(x3, HOLEY_ELEMENTS, ZFlag, ne);
|
||||
__ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
|
||||
__ Assert(
|
||||
eq,
|
||||
AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray);
|
||||
}
|
||||
|
||||
Label fast_elements_case;
|
||||
@ -1202,7 +1174,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
|
||||
if (__ emit_debug_code()) {
|
||||
__ Ldr(w1, MemOperand(handle_scope_base, kLevelOffset));
|
||||
__ Cmp(w1, level_reg);
|
||||
__ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
|
||||
__ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall);
|
||||
}
|
||||
__ Sub(level_reg, level_reg, 1);
|
||||
__ Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
|
||||
@ -1218,7 +1190,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
|
||||
__ Peek(x21, (spill_offset + 2) * kXRegSize);
|
||||
__ Peek(x22, (spill_offset + 3) * kXRegSize);
|
||||
|
||||
__ LeaveExitFrame(false, x1);
|
||||
__ LeaveExitFrame(false, x1, x5);
|
||||
|
||||
// Check if the function scheduled an exception.
|
||||
__ Mov(x5, ExternalReference::scheduled_exception_address(isolate));
|
||||
|
1
deps/v8/src/arm64/constants-arm64.h
vendored
1
deps/v8/src/arm64/constants-arm64.h
vendored
@ -101,7 +101,6 @@ const int kIp1Code = 17;
|
||||
const int kFramePointerRegCode = 29;
|
||||
const int kLinkRegCode = 30;
|
||||
const int kZeroRegCode = 31;
|
||||
const int kJSSPCode = 28;
|
||||
const int kSPRegInternalCode = 63;
|
||||
const unsigned kRegCodeMask = 0x1f;
|
||||
const unsigned kShiftAmountWRegMask = 0x1f;
|
||||
|
2
deps/v8/src/arm64/cpu-arm64.cc
vendored
2
deps/v8/src/arm64/cpu-arm64.cc
vendored
@ -31,7 +31,7 @@ class CacheLineSizes {
|
||||
uint32_t ExtractCacheLineSize(int cache_line_size_shift) const {
|
||||
// The cache type register holds the size of cache lines in words as a
|
||||
// power of two.
|
||||
return 4 << ((cache_type_register_ >> cache_line_size_shift) & 0xf);
|
||||
return 4 << ((cache_type_register_ >> cache_line_size_shift) & 0xF);
|
||||
}
|
||||
|
||||
uint32_t cache_type_register_;
|
||||
|
14
deps/v8/src/arm64/deoptimizer-arm64.cc
vendored
14
deps/v8/src/arm64/deoptimizer-arm64.cc
vendored
@ -108,11 +108,9 @@ void Deoptimizer::TableEntryGenerator::Generate() {
|
||||
__ PushCPURegList(saved_float_registers);
|
||||
|
||||
// We save all the registers except sp, lr and the masm scratches.
|
||||
CPURegList saved_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 27);
|
||||
CPURegList saved_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 28);
|
||||
saved_registers.Remove(ip0);
|
||||
saved_registers.Remove(ip1);
|
||||
// TODO(arm): padding here can be replaced with jssp/x28 when allocatable.
|
||||
saved_registers.Combine(padreg);
|
||||
saved_registers.Combine(fp);
|
||||
DCHECK_EQ(saved_registers.Count() % 2, 0);
|
||||
__ PushCPURegList(saved_registers);
|
||||
@ -220,8 +218,12 @@ void Deoptimizer::TableEntryGenerator::Generate() {
|
||||
}
|
||||
__ Pop(x4, padreg); // Restore deoptimizer object (class Deoptimizer).
|
||||
|
||||
__ Ldr(__ StackPointer(),
|
||||
MemOperand(x4, Deoptimizer::caller_frame_top_offset()));
|
||||
{
|
||||
UseScratchRegisterScope temps(masm());
|
||||
Register scratch = temps.AcquireX();
|
||||
__ Ldr(scratch, MemOperand(x4, Deoptimizer::caller_frame_top_offset()));
|
||||
__ Mov(__ StackPointer(), scratch);
|
||||
}
|
||||
|
||||
// Replace the current (input) frame with the output frames.
|
||||
Label outer_push_loop, inner_push_loop,
|
||||
@ -324,7 +326,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
|
||||
if (__ emit_debug_code()) {
|
||||
// Ensure the entry_id looks sensible, ie. 0 <= entry_id < count().
|
||||
__ Cmp(entry_id, count());
|
||||
__ Check(lo, kOffsetOutOfRange);
|
||||
__ Check(lo, AbortReason::kOffsetOutOfRange);
|
||||
}
|
||||
}
|
||||
|
||||
|
51
deps/v8/src/arm64/disasm-arm64.cc
vendored
51
deps/v8/src/arm64/disasm-arm64.cc
vendored
@ -256,27 +256,26 @@ void DisassemblingDecoder::VisitLogicalImmediate(Instruction* instr) {
|
||||
|
||||
bool DisassemblingDecoder::IsMovzMovnImm(unsigned reg_size, uint64_t value) {
|
||||
DCHECK((reg_size == kXRegSizeInBits) ||
|
||||
((reg_size == kWRegSizeInBits) && (value <= 0xffffffff)));
|
||||
((reg_size == kWRegSizeInBits) && (value <= 0xFFFFFFFF)));
|
||||
|
||||
// Test for movz: 16-bits set at positions 0, 16, 32 or 48.
|
||||
if (((value & 0xffffffffffff0000UL) == 0UL) ||
|
||||
((value & 0xffffffff0000ffffUL) == 0UL) ||
|
||||
((value & 0xffff0000ffffffffUL) == 0UL) ||
|
||||
((value & 0x0000ffffffffffffUL) == 0UL)) {
|
||||
if (((value & 0xFFFFFFFFFFFF0000UL) == 0UL) ||
|
||||
((value & 0xFFFFFFFF0000FFFFUL) == 0UL) ||
|
||||
((value & 0xFFFF0000FFFFFFFFUL) == 0UL) ||
|
||||
((value & 0x0000FFFFFFFFFFFFUL) == 0UL)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Test for movn: NOT(16-bits set at positions 0, 16, 32 or 48).
|
||||
if ((reg_size == kXRegSizeInBits) &&
|
||||
(((value & 0xffffffffffff0000UL) == 0xffffffffffff0000UL) ||
|
||||
((value & 0xffffffff0000ffffUL) == 0xffffffff0000ffffUL) ||
|
||||
((value & 0xffff0000ffffffffUL) == 0xffff0000ffffffffUL) ||
|
||||
((value & 0x0000ffffffffffffUL) == 0x0000ffffffffffffUL))) {
|
||||
(((value & 0xFFFFFFFFFFFF0000UL) == 0xFFFFFFFFFFFF0000UL) ||
|
||||
((value & 0xFFFFFFFF0000FFFFUL) == 0xFFFFFFFF0000FFFFUL) ||
|
||||
((value & 0xFFFF0000FFFFFFFFUL) == 0xFFFF0000FFFFFFFFUL) ||
|
||||
((value & 0x0000FFFFFFFFFFFFUL) == 0x0000FFFFFFFFFFFFUL))) {
|
||||
return true;
|
||||
}
|
||||
if ((reg_size == kWRegSizeInBits) &&
|
||||
(((value & 0xffff0000) == 0xffff0000) ||
|
||||
((value & 0x0000ffff) == 0x0000ffff))) {
|
||||
if ((reg_size == kWRegSizeInBits) && (((value & 0xFFFF0000) == 0xFFFF0000) ||
|
||||
((value & 0x0000FFFF) == 0x0000FFFF))) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@ -3332,8 +3331,6 @@ void DisassemblingDecoder::AppendRegisterNameToOutput(const CPURegister& reg) {
|
||||
// Filter special registers
|
||||
if (reg.IsX() && (reg.code() == 27)) {
|
||||
AppendToOutput("cp");
|
||||
} else if (reg.IsX() && (reg.code() == 28)) {
|
||||
AppendToOutput("jssp");
|
||||
} else if (reg.IsX() && (reg.code() == 29)) {
|
||||
AppendToOutput("fp");
|
||||
} else if (reg.IsX() && (reg.code() == 30)) {
|
||||
@ -3469,7 +3466,7 @@ int DisassemblingDecoder::SubstituteRegisterField(Instruction* instr,
|
||||
case 'e':
|
||||
// This is register Rm, but using a 4-bit specifier. Used in NEON
|
||||
// by-element instructions.
|
||||
reg_num = (instr->Rm() & 0xf);
|
||||
reg_num = (instr->Rm() & 0xF);
|
||||
break;
|
||||
case 'a':
|
||||
reg_num = instr->Ra();
|
||||
@ -3545,8 +3542,6 @@ int DisassemblingDecoder::SubstituteRegisterField(Instruction* instr,
|
||||
return field_len;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
reg_type = CPURegister::kRegister;
|
||||
reg_size = kXRegSizeInBits;
|
||||
}
|
||||
|
||||
if ((reg_type == CPURegister::kRegister) && (reg_num == kZeroRegCode) &&
|
||||
@ -3569,7 +3564,7 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
|
||||
uint64_t imm = static_cast<uint64_t>(instr->ImmMoveWide())
|
||||
<< (16 * instr->ShiftMoveWide());
|
||||
if (format[5] == 'N') imm = ~imm;
|
||||
if (!instr->SixtyFourBits()) imm &= UINT64_C(0xffffffff);
|
||||
if (!instr->SixtyFourBits()) imm &= UINT64_C(0xFFFFFFFF);
|
||||
AppendToOutput("#0x%" PRIx64, imm);
|
||||
} else {
|
||||
DCHECK_EQ(format[5], 'L');
|
||||
@ -3696,7 +3691,7 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
|
||||
vm_index = (vm_index << 1) | instr->NEONM();
|
||||
}
|
||||
AppendToOutput("%d", vm_index);
|
||||
return strlen("IVByElemIndex");
|
||||
return static_cast<int>(strlen("IVByElemIndex"));
|
||||
}
|
||||
case 'I': { // INS element.
|
||||
if (strncmp(format, "IVInsIndex", strlen("IVInsIndex")) == 0) {
|
||||
@ -3709,11 +3704,11 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
|
||||
rn_index = imm4 >> tz;
|
||||
if (strncmp(format, "IVInsIndex1", strlen("IVInsIndex1")) == 0) {
|
||||
AppendToOutput("%d", rd_index);
|
||||
return strlen("IVInsIndex1");
|
||||
return static_cast<int>(strlen("IVInsIndex1"));
|
||||
} else if (strncmp(format, "IVInsIndex2",
|
||||
strlen("IVInsIndex2")) == 0) {
|
||||
AppendToOutput("%d", rn_index);
|
||||
return strlen("IVInsIndex2");
|
||||
return static_cast<int>(strlen("IVInsIndex2"));
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
@ -3728,38 +3723,38 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
|
||||
0) {
|
||||
AppendToOutput("#0x%" PRIx32 " (%.4f)", instr->ImmNEONabcdefgh(),
|
||||
instr->ImmNEONFP32());
|
||||
return strlen("IVMIImmFPSingle");
|
||||
return static_cast<int>(strlen("IVMIImmFPSingle"));
|
||||
} else if (strncmp(format, "IVMIImmFPDouble",
|
||||
strlen("IVMIImmFPDouble")) == 0) {
|
||||
AppendToOutput("#0x%" PRIx32 " (%.4f)", instr->ImmNEONabcdefgh(),
|
||||
instr->ImmNEONFP64());
|
||||
return strlen("IVMIImmFPDouble");
|
||||
return static_cast<int>(strlen("IVMIImmFPDouble"));
|
||||
} else if (strncmp(format, "IVMIImm8", strlen("IVMIImm8")) == 0) {
|
||||
uint64_t imm8 = instr->ImmNEONabcdefgh();
|
||||
AppendToOutput("#0x%" PRIx64, imm8);
|
||||
return strlen("IVMIImm8");
|
||||
return static_cast<int>(strlen("IVMIImm8"));
|
||||
} else if (strncmp(format, "IVMIImm", strlen("IVMIImm")) == 0) {
|
||||
uint64_t imm8 = instr->ImmNEONabcdefgh();
|
||||
uint64_t imm = 0;
|
||||
for (int i = 0; i < 8; ++i) {
|
||||
if (imm8 & (1 << i)) {
|
||||
imm |= (UINT64_C(0xff) << (8 * i));
|
||||
imm |= (UINT64_C(0xFF) << (8 * i));
|
||||
}
|
||||
}
|
||||
AppendToOutput("#0x%" PRIx64, imm);
|
||||
return strlen("IVMIImm");
|
||||
return static_cast<int>(strlen("IVMIImm"));
|
||||
} else if (strncmp(format, "IVMIShiftAmt1",
|
||||
strlen("IVMIShiftAmt1")) == 0) {
|
||||
int cmode = instr->NEONCmode();
|
||||
int shift_amount = 8 * ((cmode >> 1) & 3);
|
||||
AppendToOutput("#%d", shift_amount);
|
||||
return strlen("IVMIShiftAmt1");
|
||||
return static_cast<int>(strlen("IVMIShiftAmt1"));
|
||||
} else if (strncmp(format, "IVMIShiftAmt2",
|
||||
strlen("IVMIShiftAmt2")) == 0) {
|
||||
int cmode = instr->NEONCmode();
|
||||
int shift_amount = 8 << (cmode & 1);
|
||||
AppendToOutput("#%d", shift_amount);
|
||||
return strlen("IVMIShiftAmt2");
|
||||
return static_cast<int>(strlen("IVMIShiftAmt2"));
|
||||
} else {
|
||||
UNIMPLEMENTED();
|
||||
return 0;
|
||||
|
7
deps/v8/src/arm64/eh-frame-arm64.cc
vendored
7
deps/v8/src/arm64/eh-frame-arm64.cc
vendored
@ -9,7 +9,6 @@ namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
static const int kX0DwarfCode = 0;
|
||||
static const int kJsSpDwarfCode = 28;
|
||||
static const int kFpDwarfCode = 29;
|
||||
static const int kLrDwarfCode = 30;
|
||||
static const int kCSpDwarfCode = 31;
|
||||
@ -29,13 +28,11 @@ void EhFrameWriter::WriteInitialStateInCie() {
|
||||
// static
|
||||
int EhFrameWriter::RegisterToDwarfCode(Register name) {
|
||||
switch (name.code()) {
|
||||
case kRegCode_x28:
|
||||
return kJsSpDwarfCode;
|
||||
case kRegCode_x29:
|
||||
return kFpDwarfCode;
|
||||
case kRegCode_x30:
|
||||
return kLrDwarfCode;
|
||||
case kRegCode_x31:
|
||||
case kSPRegInternalCode:
|
||||
return kCSpDwarfCode;
|
||||
case kRegCode_x0:
|
||||
return kX0DwarfCode;
|
||||
@ -54,8 +51,6 @@ const char* EhFrameDisassembler::DwarfRegisterCodeToString(int code) {
|
||||
return "fp";
|
||||
case kLrDwarfCode:
|
||||
return "lr";
|
||||
case kJsSpDwarfCode:
|
||||
return "jssp";
|
||||
case kCSpDwarfCode:
|
||||
return "csp"; // This could be zr as well
|
||||
default:
|
||||
|
25
deps/v8/src/arm64/frame-constants-arm64.h
vendored
25
deps/v8/src/arm64/frame-constants-arm64.h
vendored
@ -8,10 +8,31 @@
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
// The layout of an EntryFrame is as follows:
|
||||
//
|
||||
// slot Entry frame
|
||||
// +---------------------+-----------------------
|
||||
// 0 | bad frame pointer | <-- frame ptr
|
||||
// | (0xFFF.. FF) |
|
||||
// |- - - - - - - - - - -|
|
||||
// 1 | stack frame marker |
|
||||
// | (ENTRY) |
|
||||
// |- - - - - - - - - - -|
|
||||
// 2 | stack frame marker |
|
||||
// | (0) |
|
||||
// |- - - - - - - - - - -|
|
||||
// 3 | C entry FP |
|
||||
// |- - - - - - - - - - -|
|
||||
// 4 | JS entry frame |
|
||||
// | marker |
|
||||
// |- - - - - - - - - - -|
|
||||
// 5 | padding | <-- stack ptr
|
||||
// -----+---------------------+-----------------------
|
||||
//
|
||||
class EntryFrameConstants : public AllStatic {
|
||||
public:
|
||||
static const int kCallerFPOffset =
|
||||
-(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
|
||||
static const int kCallerFPOffset = -3 * kPointerSize;
|
||||
static const int kFixedFrameSize = 6 * kPointerSize;
|
||||
};
|
||||
|
||||
class ExitFrameConstants : public TypedFrameConstants {
|
||||
|
@ -21,26 +21,26 @@ namespace internal {
|
||||
// then move this code back into instructions-arm64.cc with the same types
|
||||
// that client code uses.
|
||||
|
||||
extern const uint16_t kFP16PositiveInfinity = 0x7c00;
|
||||
extern const uint16_t kFP16NegativeInfinity = 0xfc00;
|
||||
extern const uint32_t kFP32PositiveInfinity = 0x7f800000;
|
||||
extern const uint32_t kFP32NegativeInfinity = 0xff800000;
|
||||
extern const uint64_t kFP64PositiveInfinity = 0x7ff0000000000000UL;
|
||||
extern const uint64_t kFP64NegativeInfinity = 0xfff0000000000000UL;
|
||||
extern const uint16_t kFP16PositiveInfinity = 0x7C00;
|
||||
extern const uint16_t kFP16NegativeInfinity = 0xFC00;
|
||||
extern const uint32_t kFP32PositiveInfinity = 0x7F800000;
|
||||
extern const uint32_t kFP32NegativeInfinity = 0xFF800000;
|
||||
extern const uint64_t kFP64PositiveInfinity = 0x7FF0000000000000UL;
|
||||
extern const uint64_t kFP64NegativeInfinity = 0xFFF0000000000000UL;
|
||||
|
||||
// This value is a signalling NaN as both a double and as a float (taking the
|
||||
// least-significant word).
|
||||
extern const uint64_t kFP64SignallingNaN = 0x7ff000007f800001;
|
||||
extern const uint32_t kFP32SignallingNaN = 0x7f800001;
|
||||
extern const uint64_t kFP64SignallingNaN = 0x7FF000007F800001;
|
||||
extern const uint32_t kFP32SignallingNaN = 0x7F800001;
|
||||
|
||||
// A similar value, but as a quiet NaN.
|
||||
extern const uint64_t kFP64QuietNaN = 0x7ff800007fc00001;
|
||||
extern const uint32_t kFP32QuietNaN = 0x7fc00001;
|
||||
extern const uint64_t kFP64QuietNaN = 0x7FF800007FC00001;
|
||||
extern const uint32_t kFP32QuietNaN = 0x7FC00001;
|
||||
|
||||
// The default NaN values (for FPCR.DN=1).
|
||||
extern const uint64_t kFP64DefaultNaN = 0x7ff8000000000000UL;
|
||||
extern const uint32_t kFP32DefaultNaN = 0x7fc00000;
|
||||
extern const uint16_t kFP16DefaultNaN = 0x7e00;
|
||||
extern const uint64_t kFP64DefaultNaN = 0x7FF8000000000000UL;
|
||||
extern const uint32_t kFP32DefaultNaN = 0x7FC00000;
|
||||
extern const uint16_t kFP16DefaultNaN = 0x7E00;
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
4
deps/v8/src/arm64/instrument-arm64.cc
vendored
4
deps/v8/src/arm64/instrument-arm64.cc
vendored
@ -189,8 +189,8 @@ void Instrument::DumpEventMarker(unsigned marker) {
|
||||
// line.
|
||||
static Counter* counter = GetCounter("Instruction");
|
||||
|
||||
fprintf(output_stream_, "# %c%c @ %" PRId64 "\n", marker & 0xff,
|
||||
(marker >> 8) & 0xff, counter->count());
|
||||
fprintf(output_stream_, "# %c%c @ %" PRId64 "\n", marker & 0xFF,
|
||||
(marker >> 8) & 0xFF, counter->count());
|
||||
}
|
||||
|
||||
|
||||
|
@ -45,8 +45,6 @@ const Register LoadDescriptor::SlotRegister() { return x0; }
|
||||
|
||||
const Register LoadWithVectorDescriptor::VectorRegister() { return x3; }
|
||||
|
||||
const Register LoadICProtoArrayDescriptor::HandlerRegister() { return x4; }
|
||||
|
||||
const Register StoreDescriptor::ReceiverRegister() { return x1; }
|
||||
const Register StoreDescriptor::NameRegister() { return x2; }
|
||||
const Register StoreDescriptor::ValueRegister() { return x0; }
|
||||
@ -209,6 +207,11 @@ void TransitionElementsKindDescriptor::InitializePlatformSpecific(
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
void AbortJSDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
Register registers[] = {x1};
|
||||
data->InitializePlatformSpecific(arraysize(registers), registers);
|
||||
}
|
||||
|
||||
void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
|
||||
CallInterfaceDescriptorData* data) {
|
||||
|
54
deps/v8/src/arm64/macro-assembler-arm64-inl.h
vendored
54
deps/v8/src/arm64/macro-assembler-arm64-inl.h
vendored
@ -1048,7 +1048,6 @@ void MacroAssembler::AlignAndSetCSPForFrame() {
|
||||
DCHECK_GE(sp_alignment, 16);
|
||||
DCHECK(base::bits::IsPowerOfTwo(sp_alignment));
|
||||
Bic(csp, StackPointer(), sp_alignment - 1);
|
||||
SetStackPointer(csp);
|
||||
}
|
||||
|
||||
void TurboAssembler::BumpSystemStackPointer(const Operand& space) {
|
||||
@ -1140,22 +1139,6 @@ void MacroAssembler::SmiUntagToFloat(VRegister dst, Register src) {
|
||||
Scvtf(dst, src, kSmiShift);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::SmiTagAndPush(Register src) {
|
||||
STATIC_ASSERT((static_cast<unsigned>(kSmiShift) == kWRegSizeInBits) &&
|
||||
(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits) &&
|
||||
(kSmiTag == 0));
|
||||
Push(src.W(), wzr);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::SmiTagAndPush(Register src1, Register src2) {
|
||||
STATIC_ASSERT((static_cast<unsigned>(kSmiShift) == kWRegSizeInBits) &&
|
||||
(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits) &&
|
||||
(kSmiTag == 0));
|
||||
Push(src1.W(), wzr, src2.W(), wzr);
|
||||
}
|
||||
|
||||
void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
|
||||
Label* not_smi_label) {
|
||||
STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
|
||||
@ -1222,7 +1205,7 @@ void MacroAssembler::ObjectTag(Register tagged_obj, Register obj) {
|
||||
if (emit_debug_code()) {
|
||||
Label ok;
|
||||
Tbz(obj, 0, &ok);
|
||||
Abort(kObjectTagged);
|
||||
Abort(AbortReason::kObjectTagged);
|
||||
Bind(&ok);
|
||||
}
|
||||
Orr(tagged_obj, obj, kHeapObjectTag);
|
||||
@ -1234,7 +1217,7 @@ void MacroAssembler::ObjectUntag(Register untagged_obj, Register obj) {
|
||||
if (emit_debug_code()) {
|
||||
Label ok;
|
||||
Tbnz(obj, 0, &ok);
|
||||
Abort(kObjectNotTagged);
|
||||
Abort(AbortReason::kObjectNotTagged);
|
||||
Bind(&ok);
|
||||
}
|
||||
Bic(untagged_obj, obj, kHeapObjectTag);
|
||||
@ -1246,7 +1229,10 @@ void TurboAssembler::Push(Handle<HeapObject> handle) {
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register tmp = temps.AcquireX();
|
||||
Mov(tmp, Operand(handle));
|
||||
Push(tmp);
|
||||
// This is only used in test-heap.cc, for generating code that is not
|
||||
// executed. Push a padding slot together with the handle here, to
|
||||
// satisfy the alignment requirement.
|
||||
Push(padreg, tmp);
|
||||
}
|
||||
|
||||
void TurboAssembler::Push(Smi* smi) {
|
||||
@ -1355,21 +1341,31 @@ void TurboAssembler::Drop(const Register& count, uint64_t unit_size) {
|
||||
|
||||
void TurboAssembler::DropArguments(const Register& count,
|
||||
ArgumentsCountMode mode) {
|
||||
int extra_slots = 1; // Padding slot.
|
||||
if (mode == kCountExcludesReceiver) {
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register tmp = temps.AcquireX();
|
||||
Add(tmp, count, 1);
|
||||
Drop(tmp);
|
||||
} else {
|
||||
Drop(count);
|
||||
// Add a slot for the receiver.
|
||||
++extra_slots;
|
||||
}
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register tmp = temps.AcquireX();
|
||||
Add(tmp, count, extra_slots);
|
||||
Bic(tmp, tmp, 1);
|
||||
Drop(tmp, kXRegSize);
|
||||
}
|
||||
|
||||
void TurboAssembler::DropSlots(int64_t count, uint64_t unit_size) {
|
||||
Drop(count, unit_size);
|
||||
void TurboAssembler::DropArguments(int64_t count, ArgumentsCountMode mode) {
|
||||
if (mode == kCountExcludesReceiver) {
|
||||
// Add a slot for the receiver.
|
||||
++count;
|
||||
}
|
||||
Drop(RoundUp(count, 2), kXRegSize);
|
||||
}
|
||||
|
||||
void TurboAssembler::PushArgument(const Register& arg) { Push(arg); }
|
||||
void TurboAssembler::DropSlots(int64_t count) {
|
||||
Drop(RoundUp(count, 2), kXRegSize);
|
||||
}
|
||||
|
||||
void TurboAssembler::PushArgument(const Register& arg) { Push(padreg, arg); }
|
||||
|
||||
void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) {
|
||||
DCHECK(unit_size == 0 || base::bits::IsPowerOfTwo(unit_size));
|
||||
|
329
deps/v8/src/arm64/macro-assembler-arm64.cc
vendored
329
deps/v8/src/arm64/macro-assembler-arm64.cc
vendored
@ -44,7 +44,6 @@ TurboAssembler::TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
|
||||
#endif
|
||||
tmp_list_(DefaultTmpList()),
|
||||
fptmp_list_(DefaultFPTmpList()),
|
||||
sp_(jssp),
|
||||
use_real_aborts_(true) {
|
||||
if (create_code_object == CodeObjectRequired::kYes) {
|
||||
code_object_ =
|
||||
@ -160,7 +159,7 @@ void TurboAssembler::LogicalMacro(const Register& rd, const Register& rn,
|
||||
UNREACHABLE();
|
||||
}
|
||||
} else if ((rd.Is64Bits() && (immediate == -1L)) ||
|
||||
(rd.Is32Bits() && (immediate == 0xffffffffL))) {
|
||||
(rd.Is32Bits() && (immediate == 0xFFFFFFFFL))) {
|
||||
switch (op) {
|
||||
case AND:
|
||||
Mov(rd, rn);
|
||||
@ -252,15 +251,15 @@ void TurboAssembler::Mov(const Register& rd, uint64_t imm) {
|
||||
// Generic immediate case. Imm will be represented by
|
||||
// [imm3, imm2, imm1, imm0], where each imm is 16 bits.
|
||||
// A move-zero or move-inverted is generated for the first non-zero or
|
||||
// non-0xffff immX, and a move-keep for subsequent non-zero immX.
|
||||
// non-0xFFFF immX, and a move-keep for subsequent non-zero immX.
|
||||
|
||||
uint64_t ignored_halfword = 0;
|
||||
bool invert_move = false;
|
||||
// If the number of 0xffff halfwords is greater than the number of 0x0000
|
||||
// If the number of 0xFFFF halfwords is greater than the number of 0x0000
|
||||
// halfwords, it's more efficient to use move-inverted.
|
||||
if (CountClearHalfWords(~imm, reg_size) >
|
||||
CountClearHalfWords(imm, reg_size)) {
|
||||
ignored_halfword = 0xffffL;
|
||||
ignored_halfword = 0xFFFFL;
|
||||
invert_move = true;
|
||||
}
|
||||
|
||||
@ -274,11 +273,11 @@ void TurboAssembler::Mov(const Register& rd, uint64_t imm) {
|
||||
DCHECK_EQ(reg_size % 16, 0);
|
||||
bool first_mov_done = false;
|
||||
for (int i = 0; i < (rd.SizeInBits() / 16); i++) {
|
||||
uint64_t imm16 = (imm >> (16 * i)) & 0xffffL;
|
||||
uint64_t imm16 = (imm >> (16 * i)) & 0xFFFFL;
|
||||
if (imm16 != ignored_halfword) {
|
||||
if (!first_mov_done) {
|
||||
if (invert_move) {
|
||||
movn(temp, (~imm16) & 0xffffL, 16 * i);
|
||||
movn(temp, (~imm16) & 0xFFFFL, 16 * i);
|
||||
} else {
|
||||
movz(temp, imm16, 16 * i);
|
||||
}
|
||||
@ -356,18 +355,18 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand,
|
||||
|
||||
void TurboAssembler::Movi16bitHelper(const VRegister& vd, uint64_t imm) {
|
||||
DCHECK(is_uint16(imm));
|
||||
int byte1 = (imm & 0xff);
|
||||
int byte2 = ((imm >> 8) & 0xff);
|
||||
int byte1 = (imm & 0xFF);
|
||||
int byte2 = ((imm >> 8) & 0xFF);
|
||||
if (byte1 == byte2) {
|
||||
movi(vd.Is64Bits() ? vd.V8B() : vd.V16B(), byte1);
|
||||
} else if (byte1 == 0) {
|
||||
movi(vd, byte2, LSL, 8);
|
||||
} else if (byte2 == 0) {
|
||||
movi(vd, byte1);
|
||||
} else if (byte1 == 0xff) {
|
||||
mvni(vd, ~byte2 & 0xff, LSL, 8);
|
||||
} else if (byte2 == 0xff) {
|
||||
mvni(vd, ~byte1 & 0xff);
|
||||
} else if (byte1 == 0xFF) {
|
||||
mvni(vd, ~byte2 & 0xFF, LSL, 8);
|
||||
} else if (byte2 == 0xFF) {
|
||||
mvni(vd, ~byte1 & 0xFF);
|
||||
} else {
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register temp = temps.AcquireW();
|
||||
@ -382,11 +381,11 @@ void TurboAssembler::Movi32bitHelper(const VRegister& vd, uint64_t imm) {
|
||||
uint8_t bytes[sizeof(imm)];
|
||||
memcpy(bytes, &imm, sizeof(imm));
|
||||
|
||||
// All bytes are either 0x00 or 0xff.
|
||||
// All bytes are either 0x00 or 0xFF.
|
||||
{
|
||||
bool all0orff = true;
|
||||
for (int i = 0; i < 4; ++i) {
|
||||
if ((bytes[i] != 0) && (bytes[i] != 0xff)) {
|
||||
if ((bytes[i] != 0) && (bytes[i] != 0xFF)) {
|
||||
all0orff = false;
|
||||
break;
|
||||
}
|
||||
@ -400,47 +399,47 @@ void TurboAssembler::Movi32bitHelper(const VRegister& vd, uint64_t imm) {
|
||||
|
||||
// Of the 4 bytes, only one byte is non-zero.
|
||||
for (int i = 0; i < 4; i++) {
|
||||
if ((imm & (0xff << (i * 8))) == imm) {
|
||||
if ((imm & (0xFF << (i * 8))) == imm) {
|
||||
movi(vd, bytes[i], LSL, i * 8);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Of the 4 bytes, only one byte is not 0xff.
|
||||
// Of the 4 bytes, only one byte is not 0xFF.
|
||||
for (int i = 0; i < 4; i++) {
|
||||
uint32_t mask = ~(0xff << (i * 8));
|
||||
uint32_t mask = ~(0xFF << (i * 8));
|
||||
if ((imm & mask) == mask) {
|
||||
mvni(vd, ~bytes[i] & 0xff, LSL, i * 8);
|
||||
mvni(vd, ~bytes[i] & 0xFF, LSL, i * 8);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Immediate is of the form 0x00MMFFFF.
|
||||
if ((imm & 0xff00ffff) == 0x0000ffff) {
|
||||
if ((imm & 0xFF00FFFF) == 0x0000FFFF) {
|
||||
movi(vd, bytes[2], MSL, 16);
|
||||
return;
|
||||
}
|
||||
|
||||
// Immediate is of the form 0x0000MMFF.
|
||||
if ((imm & 0xffff00ff) == 0x000000ff) {
|
||||
if ((imm & 0xFFFF00FF) == 0x000000FF) {
|
||||
movi(vd, bytes[1], MSL, 8);
|
||||
return;
|
||||
}
|
||||
|
||||
// Immediate is of the form 0xFFMM0000.
|
||||
if ((imm & 0xff00ffff) == 0xff000000) {
|
||||
mvni(vd, ~bytes[2] & 0xff, MSL, 16);
|
||||
if ((imm & 0xFF00FFFF) == 0xFF000000) {
|
||||
mvni(vd, ~bytes[2] & 0xFF, MSL, 16);
|
||||
return;
|
||||
}
|
||||
// Immediate is of the form 0xFFFFMM00.
|
||||
if ((imm & 0xffff00ff) == 0xffff0000) {
|
||||
mvni(vd, ~bytes[1] & 0xff, MSL, 8);
|
||||
if ((imm & 0xFFFF00FF) == 0xFFFF0000) {
|
||||
mvni(vd, ~bytes[1] & 0xFF, MSL, 8);
|
||||
return;
|
||||
}
|
||||
|
||||
// Top and bottom 16-bits are equal.
|
||||
if (((imm >> 16) & 0xffff) == (imm & 0xffff)) {
|
||||
Movi16bitHelper(vd.Is64Bits() ? vd.V4H() : vd.V8H(), imm & 0xffff);
|
||||
if (((imm >> 16) & 0xFFFF) == (imm & 0xFFFF)) {
|
||||
Movi16bitHelper(vd.Is64Bits() ? vd.V4H() : vd.V8H(), imm & 0xFFFF);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -454,12 +453,12 @@ void TurboAssembler::Movi32bitHelper(const VRegister& vd, uint64_t imm) {
|
||||
}
|
||||
|
||||
void TurboAssembler::Movi64bitHelper(const VRegister& vd, uint64_t imm) {
|
||||
// All bytes are either 0x00 or 0xff.
|
||||
// All bytes are either 0x00 or 0xFF.
|
||||
{
|
||||
bool all0orff = true;
|
||||
for (int i = 0; i < 8; ++i) {
|
||||
int byteval = (imm >> (i * 8)) & 0xff;
|
||||
if (byteval != 0 && byteval != 0xff) {
|
||||
int byteval = (imm >> (i * 8)) & 0xFF;
|
||||
if (byteval != 0 && byteval != 0xFF) {
|
||||
all0orff = false;
|
||||
break;
|
||||
}
|
||||
@ -471,8 +470,8 @@ void TurboAssembler::Movi64bitHelper(const VRegister& vd, uint64_t imm) {
|
||||
}
|
||||
|
||||
// Top and bottom 32-bits are equal.
|
||||
if (((imm >> 32) & 0xffffffff) == (imm & 0xffffffff)) {
|
||||
Movi32bitHelper(vd.Is64Bits() ? vd.V2S() : vd.V4S(), imm & 0xffffffff);
|
||||
if (((imm >> 32) & 0xFFFFFFFF) == (imm & 0xFFFFFFFF)) {
|
||||
Movi32bitHelper(vd.Is64Bits() ? vd.V2S() : vd.V4S(), imm & 0xFFFFFFFF);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -547,7 +546,7 @@ unsigned TurboAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
|
||||
DCHECK_EQ(reg_size % 8, 0);
|
||||
int count = 0;
|
||||
for (unsigned i = 0; i < (reg_size / 16); i++) {
|
||||
if ((imm & 0xffff) == 0) {
|
||||
if ((imm & 0xFFFF) == 0) {
|
||||
count++;
|
||||
}
|
||||
imm >>= 16;
|
||||
@ -563,9 +562,8 @@ bool TurboAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
|
||||
return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
|
||||
}
|
||||
|
||||
|
||||
// The movn instruction can generate immediates containing an arbitrary 16-bit
|
||||
// half-word, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff.
|
||||
// half-word, with remaining bits set, eg. 0xFFFF1234, 0xFFFF1234FFFFFFFF.
|
||||
bool TurboAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
|
||||
return IsImmMovz(~imm, reg_size);
|
||||
}
|
||||
@ -1375,7 +1373,7 @@ void TurboAssembler::Poke(const CPURegister& src, const Operand& offset) {
|
||||
DCHECK_GE(offset.ImmediateValue(), 0);
|
||||
} else if (emit_debug_code()) {
|
||||
Cmp(xzr, offset);
|
||||
Check(le, kStackAccessBelowStackPointer);
|
||||
Check(le, AbortReason::kStackAccessBelowStackPointer);
|
||||
}
|
||||
|
||||
Str(src, MemOperand(StackPointer(), offset));
|
||||
@ -1387,7 +1385,7 @@ void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
|
||||
DCHECK_GE(offset.ImmediateValue(), 0);
|
||||
} else if (emit_debug_code()) {
|
||||
Cmp(xzr, offset);
|
||||
Check(le, kStackAccessBelowStackPointer);
|
||||
Check(le, AbortReason::kStackAccessBelowStackPointer);
|
||||
}
|
||||
|
||||
Ldr(dst, MemOperand(StackPointer(), offset));
|
||||
@ -1426,7 +1424,7 @@ void MacroAssembler::PushCalleeSavedRegisters() {
|
||||
stp(d8, d9, tos);
|
||||
|
||||
stp(x29, x30, tos);
|
||||
stp(x27, x28, tos); // x28 = jssp
|
||||
stp(x27, x28, tos);
|
||||
stp(x25, x26, tos);
|
||||
stp(x23, x24, tos);
|
||||
stp(x21, x22, tos);
|
||||
@ -1448,7 +1446,7 @@ void MacroAssembler::PopCalleeSavedRegisters() {
|
||||
ldp(x21, x22, tos);
|
||||
ldp(x23, x24, tos);
|
||||
ldp(x25, x26, tos);
|
||||
ldp(x27, x28, tos); // x28 = jssp
|
||||
ldp(x27, x28, tos);
|
||||
ldp(x29, x30, tos);
|
||||
|
||||
ldp(d8, d9, tos);
|
||||
@ -1479,7 +1477,7 @@ void TurboAssembler::AssertStackConsistency() {
|
||||
{ DontEmitDebugCodeScope dont_emit_debug_code_scope(this);
|
||||
// Restore StackPointer().
|
||||
sub(StackPointer(), csp, StackPointer());
|
||||
Abort(kTheCurrentStackPointerIsBelowCsp);
|
||||
Abort(AbortReason::kTheCurrentStackPointerIsBelowCsp);
|
||||
}
|
||||
|
||||
bind(&ok);
|
||||
@ -1531,7 +1529,7 @@ void TurboAssembler::CopyDoubleWords(Register dst, Register src, Register count,
|
||||
Subs(pointer1, pointer1, pointer2);
|
||||
B(lt, &pointer1_below_pointer2);
|
||||
Cmp(pointer1, count);
|
||||
Check(ge, kOffsetOutOfRange);
|
||||
Check(ge, AbortReason::kOffsetOutOfRange);
|
||||
Bind(&pointer1_below_pointer2);
|
||||
Add(pointer1, pointer1, pointer2);
|
||||
}
|
||||
@ -1595,7 +1593,7 @@ void TurboAssembler::AssertFPCRState(Register fpcr) {
|
||||
B(eq, &done);
|
||||
|
||||
Bind(&unexpected_mode);
|
||||
Abort(kUnexpectedFPCRMode);
|
||||
Abort(AbortReason::kUnexpectedFPCRMode);
|
||||
|
||||
Bind(&done);
|
||||
}
|
||||
@ -1632,7 +1630,7 @@ void TurboAssembler::Move(Register dst, Register src) { Mov(dst, src); }
|
||||
void TurboAssembler::Move(Register dst, Handle<HeapObject> x) { Mov(dst, x); }
|
||||
void TurboAssembler::Move(Register dst, Smi* src) { Mov(dst, src); }
|
||||
|
||||
void TurboAssembler::AssertSmi(Register object, BailoutReason reason) {
|
||||
void TurboAssembler::AssertSmi(Register object, AbortReason reason) {
|
||||
if (emit_debug_code()) {
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
Tst(object, kSmiTagMask);
|
||||
@ -1640,7 +1638,7 @@ void TurboAssembler::AssertSmi(Register object, BailoutReason reason) {
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::AssertNotSmi(Register object, BailoutReason reason) {
|
||||
void MacroAssembler::AssertNotSmi(Register object, AbortReason reason) {
|
||||
if (emit_debug_code()) {
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
Tst(object, kSmiTagMask);
|
||||
@ -1650,44 +1648,44 @@ void MacroAssembler::AssertNotSmi(Register object, BailoutReason reason) {
|
||||
|
||||
void MacroAssembler::AssertFixedArray(Register object) {
|
||||
if (emit_debug_code()) {
|
||||
AssertNotSmi(object, kOperandIsASmiAndNotAFixedArray);
|
||||
AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotAFixedArray);
|
||||
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register temp = temps.AcquireX();
|
||||
|
||||
CompareObjectType(object, temp, temp, FIXED_ARRAY_TYPE);
|
||||
Check(eq, kOperandIsNotAFixedArray);
|
||||
Check(eq, AbortReason::kOperandIsNotAFixedArray);
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::AssertFunction(Register object) {
|
||||
if (emit_debug_code()) {
|
||||
AssertNotSmi(object, kOperandIsASmiAndNotAFunction);
|
||||
AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotAFunction);
|
||||
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register temp = temps.AcquireX();
|
||||
|
||||
CompareObjectType(object, temp, temp, JS_FUNCTION_TYPE);
|
||||
Check(eq, kOperandIsNotAFunction);
|
||||
Check(eq, AbortReason::kOperandIsNotAFunction);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::AssertBoundFunction(Register object) {
|
||||
if (emit_debug_code()) {
|
||||
AssertNotSmi(object, kOperandIsASmiAndNotABoundFunction);
|
||||
AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotABoundFunction);
|
||||
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register temp = temps.AcquireX();
|
||||
|
||||
CompareObjectType(object, temp, temp, JS_BOUND_FUNCTION_TYPE);
|
||||
Check(eq, kOperandIsNotABoundFunction);
|
||||
Check(eq, AbortReason::kOperandIsNotABoundFunction);
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::AssertGeneratorObject(Register object) {
|
||||
if (!emit_debug_code()) return;
|
||||
AssertNotSmi(object, kOperandIsASmiAndNotAGeneratorObject);
|
||||
AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
|
||||
|
||||
// Load map
|
||||
UseScratchRegisterScope temps(this);
|
||||
@ -1704,7 +1702,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
|
||||
|
||||
bind(&do_check);
|
||||
// Restore generator object to register and perform assertion
|
||||
Check(eq, kOperandIsNotAGeneratorObject);
|
||||
Check(eq, AbortReason::kOperandIsNotAGeneratorObject);
|
||||
}
|
||||
|
||||
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
|
||||
@ -1716,7 +1714,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
|
||||
JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &done_checking);
|
||||
Ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
|
||||
CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
|
||||
Assert(eq, kExpectedUndefinedOrCell);
|
||||
Assert(eq, AbortReason::kExpectedUndefinedOrCell);
|
||||
Bind(&done_checking);
|
||||
}
|
||||
}
|
||||
@ -1726,7 +1724,7 @@ void TurboAssembler::AssertPositiveOrZero(Register value) {
|
||||
Label done;
|
||||
int sign_bit = value.Is64Bits() ? kXSignBit : kWSignBit;
|
||||
Tbz(value, sign_bit, &done);
|
||||
Abort(kUnexpectedNegativeValue);
|
||||
Abort(AbortReason::kUnexpectedNegativeValue);
|
||||
Bind(&done);
|
||||
}
|
||||
}
|
||||
@ -1855,72 +1853,14 @@ void TurboAssembler::CallCFunction(Register function, int num_of_reg_args,
|
||||
DCHECK_LE(num_of_double_args + num_of_reg_args, 2);
|
||||
}
|
||||
|
||||
// We rely on the frame alignment being 16 bytes, which means we never need
|
||||
// to align the CSP by an unknown number of bytes and we always know the delta
|
||||
// between the stack pointer and the frame pointer.
|
||||
DCHECK_EQ(ActivationFrameAlignment(), 16);
|
||||
|
||||
// If the stack pointer is not csp, we need to derive an aligned csp from the
|
||||
// current stack pointer.
|
||||
const Register old_stack_pointer = StackPointer();
|
||||
if (!csp.Is(old_stack_pointer)) {
|
||||
AssertStackConsistency();
|
||||
|
||||
int sp_alignment = ActivationFrameAlignment();
|
||||
// The current stack pointer is a callee saved register, and is preserved
|
||||
// across the call.
|
||||
DCHECK(kCalleeSaved.IncludesAliasOf(old_stack_pointer));
|
||||
|
||||
// If more than eight arguments are passed to the function, we expect the
|
||||
// ninth argument onwards to have been placed on the csp-based stack
|
||||
// already. We assume csp already points to the last stack-passed argument
|
||||
// in that case.
|
||||
// Otherwise, align and synchronize the system stack pointer with jssp.
|
||||
if (num_of_reg_args <= kRegisterPassedArguments) {
|
||||
Bic(csp, old_stack_pointer, sp_alignment - 1);
|
||||
}
|
||||
SetStackPointer(csp);
|
||||
}
|
||||
|
||||
// Call directly. The function called cannot cause a GC, or allow preemption,
|
||||
// so the return address in the link register stays correct.
|
||||
Call(function);
|
||||
|
||||
if (csp.Is(old_stack_pointer)) {
|
||||
if (num_of_reg_args > kRegisterPassedArguments) {
|
||||
// Drop the register passed arguments.
|
||||
int claim_slots = RoundUp(num_of_reg_args - kRegisterPassedArguments, 2);
|
||||
Drop(claim_slots);
|
||||
}
|
||||
} else {
|
||||
DCHECK(jssp.Is(old_stack_pointer));
|
||||
if (emit_debug_code()) {
|
||||
UseScratchRegisterScope temps(this);
|
||||
Register temp = temps.AcquireX();
|
||||
|
||||
if (num_of_reg_args > kRegisterPassedArguments) {
|
||||
// We don't need to drop stack arguments, as the stack pointer will be
|
||||
// jssp when returning from this function. However, in debug builds, we
|
||||
// can check that jssp is as expected.
|
||||
int claim_slots =
|
||||
RoundUp(num_of_reg_args - kRegisterPassedArguments, 2);
|
||||
|
||||
// Check jssp matches the previous value on the stack.
|
||||
Ldr(temp, MemOperand(csp, claim_slots * kPointerSize));
|
||||
Cmp(jssp, temp);
|
||||
Check(eq, kTheStackWasCorruptedByMacroAssemblerCall);
|
||||
} else {
|
||||
// Because the stack pointer must be aligned on a 16-byte boundary, the
|
||||
// aligned csp can be up to 12 bytes below the jssp. This is the case
|
||||
// where we only pushed one W register on top of an aligned jssp.
|
||||
Sub(temp, csp, old_stack_pointer);
|
||||
// We want temp <= 0 && temp >= -12.
|
||||
Cmp(temp, 0);
|
||||
Ccmp(temp, -12, NFlag, le);
|
||||
Check(ge, kTheStackWasCorruptedByMacroAssemblerCall);
|
||||
}
|
||||
}
|
||||
SetStackPointer(old_stack_pointer);
|
||||
if (num_of_reg_args > kRegisterPassedArguments) {
|
||||
// Drop the register passed arguments.
|
||||
int claim_slots = RoundUp(num_of_reg_args - kRegisterPassedArguments, 2);
|
||||
Drop(claim_slots);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1997,10 +1937,10 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) {
|
||||
// Addresses are 48 bits so we never need to load the upper 16 bits.
|
||||
uint64_t imm = reinterpret_cast<uint64_t>(target);
|
||||
// If we don't use ARM tagged addresses, the 16 higher bits must be 0.
|
||||
DCHECK_EQ((imm >> 48) & 0xffff, 0);
|
||||
movz(temp, (imm >> 0) & 0xffff, 0);
|
||||
movk(temp, (imm >> 16) & 0xffff, 16);
|
||||
movk(temp, (imm >> 32) & 0xffff, 32);
|
||||
DCHECK_EQ((imm >> 48) & 0xFFFF, 0);
|
||||
movz(temp, (imm >> 0) & 0xFFFF, 0);
|
||||
movk(temp, (imm >> 16) & 0xFFFF, 16);
|
||||
movk(temp, (imm >> 32) & 0xFFFF, 32);
|
||||
} else {
|
||||
Ldr(temp, Immediate(reinterpret_cast<intptr_t>(target), rmode));
|
||||
}
|
||||
@ -2160,23 +2100,32 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
|
||||
// after we drop current frame. We add kPointerSize to count the receiver
|
||||
// argument which is not included into formal parameters count.
|
||||
Register dst_reg = scratch0;
|
||||
add(dst_reg, fp, Operand(caller_args_count_reg, LSL, kPointerSizeLog2));
|
||||
add(dst_reg, dst_reg,
|
||||
Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
|
||||
Add(dst_reg, fp, Operand(caller_args_count_reg, LSL, kPointerSizeLog2));
|
||||
Add(dst_reg, dst_reg, StandardFrameConstants::kCallerSPOffset + kPointerSize);
|
||||
// Round dst_reg up to a multiple of 16 bytes, so that we overwrite any
|
||||
// potential padding.
|
||||
Add(dst_reg, dst_reg, 15);
|
||||
Bic(dst_reg, dst_reg, 15);
|
||||
|
||||
Register src_reg = caller_args_count_reg;
|
||||
// Calculate the end of source area. +kPointerSize is for the receiver.
|
||||
if (callee_args_count.is_reg()) {
|
||||
add(src_reg, jssp, Operand(callee_args_count.reg(), LSL, kPointerSizeLog2));
|
||||
add(src_reg, src_reg, Operand(kPointerSize));
|
||||
Add(src_reg, StackPointer(),
|
||||
Operand(callee_args_count.reg(), LSL, kPointerSizeLog2));
|
||||
Add(src_reg, src_reg, kPointerSize);
|
||||
} else {
|
||||
add(src_reg, jssp,
|
||||
Operand((callee_args_count.immediate() + 1) * kPointerSize));
|
||||
Add(src_reg, StackPointer(),
|
||||
(callee_args_count.immediate() + 1) * kPointerSize);
|
||||
}
|
||||
|
||||
// Round src_reg up to a multiple of 16 bytes, so we include any potential
|
||||
// padding in the copy.
|
||||
Add(src_reg, src_reg, 15);
|
||||
Bic(src_reg, src_reg, 15);
|
||||
|
||||
if (FLAG_debug_code) {
|
||||
Cmp(src_reg, dst_reg);
|
||||
Check(lo, kStackAccessBelowStackPointer);
|
||||
Check(lo, AbortReason::kStackAccessBelowStackPointer);
|
||||
}
|
||||
|
||||
// Restore caller's frame pointer and return address now as they will be
|
||||
@ -2196,12 +2145,11 @@ void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
|
||||
Ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
|
||||
Str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
|
||||
bind(&entry);
|
||||
Cmp(jssp, src_reg);
|
||||
Cmp(StackPointer(), src_reg);
|
||||
B(ne, &loop);
|
||||
|
||||
// Leave current frame.
|
||||
Mov(jssp, dst_reg);
|
||||
SetStackPointer(jssp);
|
||||
Mov(StackPointer(), dst_reg);
|
||||
AssertStackConsistency();
|
||||
}
|
||||
|
||||
@ -2412,12 +2360,12 @@ void TurboAssembler::TryConvertDoubleToInt64(Register result,
|
||||
// the modulo operation on an integer register so we convert to a 64-bit
|
||||
// integer.
|
||||
//
|
||||
// Fcvtzs will saturate to INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff)
|
||||
// Fcvtzs will saturate to INT64_MIN (0x800...00) or INT64_MAX (0x7FF...FF)
|
||||
// when the double is out of range. NaNs and infinities will be converted to 0
|
||||
// (as ECMA-262 requires).
|
||||
Fcvtzs(result.X(), double_input);
|
||||
|
||||
// The values INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) are not
|
||||
// The values INT64_MIN (0x800...00) or INT64_MAX (0x7FF...FF) are not
|
||||
// representable using a double, so if the result is one of those then we know
|
||||
// that saturation occurred, and we need to manually handle the conversion.
|
||||
//
|
||||
@ -2437,17 +2385,6 @@ void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
|
||||
// contain our truncated int32 result.
|
||||
TryConvertDoubleToInt64(result, double_input, &done);
|
||||
|
||||
const Register old_stack_pointer = StackPointer();
|
||||
if (csp.Is(old_stack_pointer)) {
|
||||
// This currently only happens during compiler-unittest. If it arises
|
||||
// during regular code generation the DoubleToI stub should be updated to
|
||||
// cope with csp and have an extra parameter indicating which stack pointer
|
||||
// it should use.
|
||||
Push(jssp, xzr); // Push xzr to maintain csp required 16-bytes alignment.
|
||||
Mov(jssp, csp);
|
||||
SetStackPointer(jssp);
|
||||
}
|
||||
|
||||
// If we fell through then inline version didn't succeed - call stub instead.
|
||||
Push(lr, double_input);
|
||||
|
||||
@ -2458,13 +2395,6 @@ void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
|
||||
DCHECK_EQ(xzr.SizeInBytes(), double_input.SizeInBytes());
|
||||
Pop(xzr, lr); // xzr to drop the double input on the stack.
|
||||
|
||||
if (csp.Is(old_stack_pointer)) {
|
||||
Mov(csp, jssp);
|
||||
SetStackPointer(csp);
|
||||
AssertStackConsistency();
|
||||
Pop(xzr, jssp);
|
||||
}
|
||||
|
||||
Bind(&done);
|
||||
// Keep our invariant that the upper 32 bits are zero.
|
||||
Uxtw(result.W(), result.W());
|
||||
@ -2472,7 +2402,7 @@ void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
|
||||
|
||||
void TurboAssembler::Prologue() {
|
||||
Push(lr, fp, cp, x1);
|
||||
Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
|
||||
Add(fp, StackPointer(), StandardFrameConstants::kFixedFrameSizeFromFp);
|
||||
}
|
||||
|
||||
void TurboAssembler::EnterFrame(StackFrame::Type type) {
|
||||
@ -2481,15 +2411,14 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
|
||||
Register code_reg = temps.AcquireX();
|
||||
|
||||
if (type == StackFrame::INTERNAL) {
|
||||
DCHECK(jssp.Is(StackPointer()));
|
||||
Mov(type_reg, StackFrame::TypeToMarker(type));
|
||||
Mov(code_reg, Operand(CodeObject()));
|
||||
Push(lr, fp, type_reg, code_reg);
|
||||
Add(fp, jssp, InternalFrameConstants::kFixedFrameSizeFromFp);
|
||||
// jssp[4] : lr
|
||||
// jssp[3] : fp
|
||||
// jssp[1] : type
|
||||
// jssp[0] : [code object]
|
||||
Add(fp, StackPointer(), InternalFrameConstants::kFixedFrameSizeFromFp);
|
||||
// sp[4] : lr
|
||||
// sp[3] : fp
|
||||
// sp[1] : type
|
||||
// sp[0] : [code object]
|
||||
} else if (type == StackFrame::WASM_COMPILED) {
|
||||
DCHECK(csp.Is(StackPointer()));
|
||||
Mov(type_reg, StackFrame::TypeToMarker(type));
|
||||
@ -2502,7 +2431,6 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
|
||||
// csp[0] : for alignment
|
||||
} else {
|
||||
DCHECK_EQ(type, StackFrame::CONSTRUCT);
|
||||
DCHECK(jssp.Is(StackPointer()));
|
||||
Mov(type_reg, StackFrame::TypeToMarker(type));
|
||||
|
||||
// Users of this frame type push a context pointer after the type field,
|
||||
@ -2511,11 +2439,12 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
|
||||
|
||||
// The context pointer isn't part of the fixed frame, so add an extra slot
|
||||
// to account for it.
|
||||
Add(fp, jssp, TypedFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
|
||||
// jssp[3] : lr
|
||||
// jssp[2] : fp
|
||||
// jssp[1] : type
|
||||
// jssp[0] : cp
|
||||
Add(fp, StackPointer(),
|
||||
TypedFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
|
||||
// sp[3] : lr
|
||||
// sp[2] : fp
|
||||
// sp[1] : type
|
||||
// sp[0] : cp
|
||||
}
|
||||
}
|
||||
|
||||
@ -2526,10 +2455,9 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) {
|
||||
AssertStackConsistency();
|
||||
Pop(fp, lr);
|
||||
} else {
|
||||
DCHECK(jssp.Is(StackPointer()));
|
||||
// Drop the execution stack down to the frame pointer and restore
|
||||
// the caller frame pointer and return address.
|
||||
Mov(jssp, fp);
|
||||
Mov(StackPointer(), fp);
|
||||
AssertStackConsistency();
|
||||
Pop(fp, lr);
|
||||
}
|
||||
@ -2560,7 +2488,6 @@ void MacroAssembler::ExitFrameRestoreFPRegs() {
|
||||
void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
|
||||
int extra_space,
|
||||
StackFrame::Type frame_type) {
|
||||
DCHECK(jssp.Is(StackPointer()));
|
||||
DCHECK(frame_type == StackFrame::EXIT ||
|
||||
frame_type == StackFrame::BUILTIN_EXIT);
|
||||
|
||||
@ -2576,7 +2503,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
|
||||
// fp[-8]: STUB marker
|
||||
// fp[-16]: Space reserved for SPOffset.
|
||||
// fp[-24]: CodeObject()
|
||||
// jssp -> fp[-32]: padding
|
||||
// sp -> fp[-32]: padding
|
||||
STATIC_ASSERT((2 * kPointerSize) == ExitFrameConstants::kCallerSPOffset);
|
||||
STATIC_ASSERT((1 * kPointerSize) == ExitFrameConstants::kCallerPCOffset);
|
||||
STATIC_ASSERT((0 * kPointerSize) == ExitFrameConstants::kCallerFPOffset);
|
||||
@ -2610,23 +2537,11 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
|
||||
// fp[-16]: Space reserved for SPOffset.
|
||||
// fp[-24]: CodeObject()
|
||||
// fp[-24 - fp_size]: Saved doubles (if save_doubles is true).
|
||||
// jssp[8]: Extra space reserved for caller (if extra_space != 0).
|
||||
// jssp -> jssp[0]: Space reserved for the return address.
|
||||
// sp[8]: Extra space reserved for caller (if extra_space != 0).
|
||||
// sp -> sp[0]: Space reserved for the return address.
|
||||
|
||||
// Align and synchronize the system stack pointer with jssp.
|
||||
AlignAndSetCSPForFrame();
|
||||
DCHECK(csp.Is(StackPointer()));
|
||||
|
||||
// fp[8]: CallerPC (lr)
|
||||
// fp -> fp[0]: CallerFP (old fp)
|
||||
// fp[-8]: STUB marker
|
||||
// fp[-16]: Space reserved for SPOffset.
|
||||
// fp[-24]: CodeObject()
|
||||
// fp[-24 - fp_size]: Saved doubles (if save_doubles is true).
|
||||
// csp[8]: Memory reserved for the caller if extra_space != 0.
|
||||
// Alignment padding, if necessary.
|
||||
// csp -> csp[0]: Space reserved for the return address.
|
||||
|
||||
// ExitFrame::GetStateForFramePointer expects to find the return address at
|
||||
// the memory address immediately below the pointer stored in SPOffset.
|
||||
// It is not safe to derive much else from SPOffset, because the size of the
|
||||
@ -2638,7 +2553,8 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
|
||||
|
||||
// Leave the current exit frame.
|
||||
void MacroAssembler::LeaveExitFrame(bool restore_doubles,
|
||||
const Register& scratch) {
|
||||
const Register& scratch,
|
||||
const Register& scratch2) {
|
||||
DCHECK(csp.Is(StackPointer()));
|
||||
|
||||
if (restore_doubles) {
|
||||
@ -2652,9 +2568,10 @@ void MacroAssembler::LeaveExitFrame(bool restore_doubles,
|
||||
|
||||
if (emit_debug_code()) {
|
||||
// Also emit debug code to clear the cp in the top frame.
|
||||
Mov(scratch2, Operand(Context::kInvalidContext));
|
||||
Mov(scratch, Operand(ExternalReference(IsolateAddressId::kContextAddress,
|
||||
isolate())));
|
||||
Str(xzr, MemOperand(scratch));
|
||||
Str(scratch2, MemOperand(scratch));
|
||||
}
|
||||
// Clear the frame pointer from the top frame.
|
||||
Mov(scratch, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
|
||||
@ -2665,8 +2582,7 @@ void MacroAssembler::LeaveExitFrame(bool restore_doubles,
|
||||
// fp[8]: CallerPC (lr)
|
||||
// fp -> fp[0]: CallerFP (old fp)
|
||||
// fp[...]: The rest of the frame.
|
||||
Mov(jssp, fp);
|
||||
SetStackPointer(jssp);
|
||||
Mov(csp, fp);
|
||||
AssertStackConsistency();
|
||||
Pop(fp, lr);
|
||||
}
|
||||
@ -2830,14 +2746,12 @@ void MacroAssembler::PushSafepointRegisters() {
|
||||
|
||||
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
|
||||
// Make sure the safepoint registers list is what we expect.
|
||||
DCHECK_EQ(CPURegList::GetSafepointSavedRegisters().list(), 0x6ffcffff);
|
||||
DCHECK_EQ(CPURegList::GetSafepointSavedRegisters().list(), 0x6FFCFFFF);
|
||||
|
||||
// Safepoint registers are stored contiguously on the stack, but not all the
|
||||
// registers are saved. The following registers are excluded:
|
||||
// - x16 and x17 (ip0 and ip1) because they shouldn't be preserved outside of
|
||||
// the macro assembler.
|
||||
// - x28 (jssp) because JS stack pointer doesn't need to be included in
|
||||
// safepoint registers.
|
||||
// - x31 (csp) because the system stack pointer doesn't need to be included
|
||||
// in safepoint registers.
|
||||
//
|
||||
@ -2845,12 +2759,9 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
|
||||
// safepoint register slots.
|
||||
if ((reg_code >= 0) && (reg_code <= 15)) {
|
||||
return reg_code;
|
||||
} else if ((reg_code >= 18) && (reg_code <= 27)) {
|
||||
} else if ((reg_code >= 18) && (reg_code <= 30)) {
|
||||
// Skip ip0 and ip1.
|
||||
return reg_code - 2;
|
||||
} else if ((reg_code == 29) || (reg_code == 30)) {
|
||||
// Also skip jssp.
|
||||
return reg_code - 3;
|
||||
} else {
|
||||
// This register has no safepoint register slot.
|
||||
UNREACHABLE();
|
||||
@ -2909,7 +2820,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
|
||||
Label ok;
|
||||
Tst(scratch, kPointerSize - 1);
|
||||
B(eq, &ok);
|
||||
Abort(kUnalignedCellInWriteBarrier);
|
||||
Abort(AbortReason::kUnalignedCellInWriteBarrier);
|
||||
Bind(&ok);
|
||||
}
|
||||
|
||||
@ -2975,11 +2886,9 @@ void TurboAssembler::CallRecordWriteStub(
|
||||
Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
|
||||
RecordWriteDescriptor::kFPMode));
|
||||
|
||||
Push(object);
|
||||
Push(address);
|
||||
Push(object, address);
|
||||
|
||||
Pop(slot_parameter);
|
||||
Pop(object_parameter);
|
||||
Pop(slot_parameter, object_parameter);
|
||||
|
||||
Mov(isolate_parameter, ExternalReference::isolate_address(isolate()));
|
||||
Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
|
||||
@ -3008,7 +2917,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
|
||||
|
||||
Ldr(temp, MemOperand(address));
|
||||
Cmp(temp, value);
|
||||
Check(eq, kWrongAddressOrValuePassedToRecordWrite);
|
||||
Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
|
||||
}
|
||||
|
||||
// First, check if a write barrier is even needed. The tests below
|
||||
@ -3052,7 +2961,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
|
||||
}
|
||||
}
|
||||
|
||||
void TurboAssembler::Assert(Condition cond, BailoutReason reason) {
|
||||
void TurboAssembler::Assert(Condition cond, AbortReason reason) {
|
||||
if (emit_debug_code()) {
|
||||
Check(cond, reason);
|
||||
}
|
||||
@ -3060,14 +2969,14 @@ void TurboAssembler::Assert(Condition cond, BailoutReason reason) {
|
||||
|
||||
void MacroAssembler::AssertRegisterIsRoot(Register reg,
|
||||
Heap::RootListIndex index,
|
||||
BailoutReason reason) {
|
||||
AbortReason reason) {
|
||||
if (emit_debug_code()) {
|
||||
CompareRoot(reg, index);
|
||||
Check(eq, reason);
|
||||
}
|
||||
}
|
||||
|
||||
void TurboAssembler::Check(Condition cond, BailoutReason reason) {
|
||||
void TurboAssembler::Check(Condition cond, AbortReason reason) {
|
||||
Label ok;
|
||||
B(cond, &ok);
|
||||
Abort(reason);
|
||||
@ -3075,10 +2984,10 @@ void TurboAssembler::Check(Condition cond, BailoutReason reason) {
|
||||
Bind(&ok);
|
||||
}
|
||||
|
||||
void TurboAssembler::Abort(BailoutReason reason) {
|
||||
void TurboAssembler::Abort(AbortReason reason) {
|
||||
#ifdef DEBUG
|
||||
RecordComment("Abort message: ");
|
||||
RecordComment(GetBailoutReason(reason));
|
||||
RecordComment(GetAbortReason(reason));
|
||||
|
||||
if (FLAG_trap_on_abort) {
|
||||
Brk(0);
|
||||
@ -3086,13 +2995,6 @@ void TurboAssembler::Abort(BailoutReason reason) {
|
||||
}
|
||||
#endif
|
||||
|
||||
// Abort is used in some contexts where csp is the stack pointer. In order to
|
||||
// simplify the CallRuntime code, make sure that jssp is the stack pointer.
|
||||
// There is no risk of register corruption here because Abort doesn't return.
|
||||
Register old_stack_pointer = StackPointer();
|
||||
SetStackPointer(jssp);
|
||||
Mov(jssp, old_stack_pointer);
|
||||
|
||||
// We need some scratch registers for the MacroAssembler, so make sure we have
|
||||
// some. This is safe here because Abort never returns.
|
||||
RegList old_tmp_list = TmpList()->list();
|
||||
@ -3128,11 +3030,10 @@ void TurboAssembler::Abort(BailoutReason reason) {
|
||||
{
|
||||
BlockPoolsScope scope(this);
|
||||
Bind(&msg_address);
|
||||
EmitStringData(GetBailoutReason(reason));
|
||||
EmitStringData(GetAbortReason(reason));
|
||||
}
|
||||
}
|
||||
|
||||
SetStackPointer(old_stack_pointer);
|
||||
TmpList()->set_list(old_tmp_list);
|
||||
}
|
||||
|
||||
@ -3266,7 +3167,7 @@ void MacroAssembler::PrintfNoPreserve(const char * format,
|
||||
// We don't pass any arguments on the stack, but we still need to align the C
|
||||
// stack pointer to a 16-byte boundary for PCS compliance.
|
||||
if (!csp.Is(StackPointer())) {
|
||||
Bic(csp, StackPointer(), 0xf);
|
||||
Bic(csp, StackPointer(), 0xF);
|
||||
}
|
||||
|
||||
CallPrintf(arg_count, pcs);
|
||||
|
82
deps/v8/src/arm64/macro-assembler-arm64.h
vendored
82
deps/v8/src/arm64/macro-assembler-arm64.h
vendored
@ -216,12 +216,6 @@ class TurboAssembler : public Assembler {
|
||||
bool allow_macro_instructions() const { return allow_macro_instructions_; }
|
||||
#endif
|
||||
|
||||
// Set the current stack pointer, but don't generate any code.
|
||||
inline void SetStackPointer(const Register& stack_pointer) {
|
||||
DCHECK(!TmpList()->IncludesAliasOf(stack_pointer));
|
||||
sp_ = stack_pointer;
|
||||
}
|
||||
|
||||
// Activation support.
|
||||
void EnterFrame(StackFrame::Type type);
|
||||
void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) {
|
||||
@ -574,17 +568,18 @@ class TurboAssembler : public Assembler {
|
||||
|
||||
// Calls Abort(msg) if the condition cond is not satisfied.
|
||||
// Use --debug_code to enable.
|
||||
void Assert(Condition cond, BailoutReason reason);
|
||||
void Assert(Condition cond, AbortReason reason);
|
||||
|
||||
void AssertSmi(Register object, BailoutReason reason = kOperandIsNotASmi);
|
||||
void AssertSmi(Register object,
|
||||
AbortReason reason = AbortReason::kOperandIsNotASmi);
|
||||
|
||||
// Like Assert(), but always enabled.
|
||||
void Check(Condition cond, BailoutReason reason);
|
||||
void Check(Condition cond, AbortReason reason);
|
||||
|
||||
inline void Debug(const char* message, uint32_t code, Instr params = BREAK);
|
||||
|
||||
// Print a message to stderr and abort execution.
|
||||
void Abort(BailoutReason reason);
|
||||
void Abort(AbortReason reason);
|
||||
|
||||
// If emit_debug_code() is true, emit a run-time check to ensure that
|
||||
// StackPointer() does not point below the system stack pointer.
|
||||
@ -619,8 +614,8 @@ class TurboAssembler : public Assembler {
|
||||
static CPURegList DefaultTmpList();
|
||||
static CPURegList DefaultFPTmpList();
|
||||
|
||||
// Return the current stack pointer, as set by SetStackPointer.
|
||||
inline const Register& StackPointer() const { return sp_; }
|
||||
// Return the stack pointer.
|
||||
inline const Register& StackPointer() const { return csp; }
|
||||
|
||||
// Move macros.
|
||||
inline void Mvn(const Register& rd, uint64_t imm);
|
||||
@ -711,25 +706,22 @@ class TurboAssembler : public Assembler {
|
||||
inline void Drop(int64_t count, uint64_t unit_size = kXRegSize);
|
||||
inline void Drop(const Register& count, uint64_t unit_size = kXRegSize);
|
||||
|
||||
// Drop arguments from stack without actually accessing memory.
|
||||
// This will currently drop 'count' arguments from the stack.
|
||||
// Drop 'count' arguments from the stack, rounded up to a multiple of two,
|
||||
// without actually accessing memory.
|
||||
// We assume the size of the arguments is the pointer size.
|
||||
// An optional mode argument is passed, which can indicate we need to
|
||||
// explicitly add the receiver to the count.
|
||||
// TODO(arm64): Update this to round up the number of bytes dropped to
|
||||
// a multiple of 16, so that we can remove jssp.
|
||||
enum ArgumentsCountMode { kCountIncludesReceiver, kCountExcludesReceiver };
|
||||
inline void DropArguments(const Register& count,
|
||||
ArgumentsCountMode mode = kCountIncludesReceiver);
|
||||
inline void DropArguments(int64_t count,
|
||||
ArgumentsCountMode mode = kCountIncludesReceiver);
|
||||
|
||||
// Drop slots from stack without actually accessing memory.
|
||||
// This will currently drop 'count' slots of the given size from the stack.
|
||||
// TODO(arm64): Update this to round up the number of bytes dropped to
|
||||
// a multiple of 16, so that we can remove jssp.
|
||||
inline void DropSlots(int64_t count, uint64_t unit_size = kXRegSize);
|
||||
// Drop 'count' slots from stack, rounded up to a multiple of two, without
|
||||
// actually accessing memory.
|
||||
inline void DropSlots(int64_t count);
|
||||
|
||||
// Push a single argument to the stack.
|
||||
// TODO(arm64): Update this to push a padding slot above the argument.
|
||||
// Push a single argument, with padding, to the stack.
|
||||
inline void PushArgument(const Register& arg);
|
||||
|
||||
// Re-synchronizes the system stack pointer (csp) with the current stack
|
||||
@ -769,8 +761,7 @@ class TurboAssembler : public Assembler {
|
||||
LS_MACRO_LIST(DECLARE_FUNCTION)
|
||||
#undef DECLARE_FUNCTION
|
||||
|
||||
// Push or pop up to 4 registers of the same width to or from the stack,
|
||||
// using the current stack pointer as set by SetStackPointer.
|
||||
// Push or pop up to 4 registers of the same width to or from the stack.
|
||||
//
|
||||
// If an argument register is 'NoReg', all further arguments are also assumed
|
||||
// to be 'NoReg', and are thus not pushed or popped.
|
||||
@ -784,9 +775,8 @@ class TurboAssembler : public Assembler {
|
||||
// It is not valid to pop into the same register more than once in one
|
||||
// operation, not even into the zero register.
|
||||
//
|
||||
// If the current stack pointer (as set by SetStackPointer) is csp, then it
|
||||
// must be aligned to 16 bytes on entry and the total size of the specified
|
||||
// registers must also be a multiple of 16 bytes.
|
||||
// The stack pointer must be aligned to 16 bytes on entry and the total size
|
||||
// of the specified registers must also be a multiple of 16 bytes.
|
||||
//
|
||||
// Even if the current stack pointer is not the system stack pointer (csp),
|
||||
// Push (and derived methods) will still modify the system stack pointer in
|
||||
@ -1291,9 +1281,6 @@ class TurboAssembler : public Assembler {
|
||||
CPURegList tmp_list_;
|
||||
CPURegList fptmp_list_;
|
||||
|
||||
// The register to use as a stack pointer for stack operations.
|
||||
Register sp_;
|
||||
|
||||
bool use_real_aborts_;
|
||||
|
||||
// Helps resolve branching to labels potentially out of range.
|
||||
@ -1707,10 +1694,6 @@ class MacroAssembler : public TurboAssembler {
|
||||
//
|
||||
// Note that registers are not checked for invalid values. Use this method
|
||||
// only if you know that the GC won't try to examine the values on the stack.
|
||||
//
|
||||
// This method must not be called unless the current stack pointer (as set by
|
||||
// SetStackPointer) is the system stack pointer (csp), and is aligned to
|
||||
// ActivationFrameAlignment().
|
||||
void PushCalleeSavedRegisters();
|
||||
|
||||
// Restore the callee-saved registers (as defined by AAPCS64).
|
||||
@ -1719,10 +1702,6 @@ class MacroAssembler : public TurboAssembler {
|
||||
// thus come from higher addresses.
|
||||
// Floating-point registers are popped after general-purpose registers, and
|
||||
// thus come from higher addresses.
|
||||
//
|
||||
// This method must not be called unless the current stack pointer (as set by
|
||||
// SetStackPointer) is the system stack pointer (csp), and is aligned to
|
||||
// ActivationFrameAlignment().
|
||||
void PopCalleeSavedRegisters();
|
||||
|
||||
// Align csp for a frame, as per ActivationFrameAlignment, and make it the
|
||||
@ -1752,10 +1731,6 @@ class MacroAssembler : public TurboAssembler {
|
||||
inline void SmiUntagToDouble(VRegister dst, Register src);
|
||||
inline void SmiUntagToFloat(VRegister dst, Register src);
|
||||
|
||||
// Tag and push in one step.
|
||||
inline void SmiTagAndPush(Register src);
|
||||
inline void SmiTagAndPush(Register src1, Register src2);
|
||||
|
||||
inline void JumpIfNotSmi(Register value, Label* not_smi_label);
|
||||
inline void JumpIfBothSmi(Register value1, Register value2,
|
||||
Label* both_smi_label,
|
||||
@ -1771,7 +1746,8 @@ class MacroAssembler : public TurboAssembler {
|
||||
Label* not_smi_label);
|
||||
|
||||
// Abort execution if argument is a smi, enabled via --debug-code.
|
||||
void AssertNotSmi(Register object, BailoutReason reason = kOperandIsASmi);
|
||||
void AssertNotSmi(Register object,
|
||||
AbortReason reason = AbortReason::kOperandIsASmi);
|
||||
|
||||
inline void ObjectTag(Register tagged_obj, Register obj);
|
||||
inline void ObjectUntag(Register untagged_obj, Register obj);
|
||||
@ -1948,19 +1924,14 @@ class MacroAssembler : public TurboAssembler {
|
||||
// ---------------------------------------------------------------------------
|
||||
// Frames.
|
||||
|
||||
// The stack pointer has to switch between csp and jssp when setting up and
|
||||
// destroying the exit frame. Hence preserving/restoring the registers is
|
||||
// slightly more complicated than simple push/pop operations.
|
||||
void ExitFramePreserveFPRegs();
|
||||
void ExitFrameRestoreFPRegs();
|
||||
|
||||
// Enter exit frame. Exit frames are used when calling C code from generated
|
||||
// (JavaScript) code.
|
||||
//
|
||||
// The stack pointer must be jssp on entry, and will be set to csp by this
|
||||
// function. The frame pointer is also configured, but the only other
|
||||
// registers modified by this function are the provided scratch register, and
|
||||
// jssp.
|
||||
// The only registers modified by this function are the provided scratch
|
||||
// register, the frame pointer and the stack pointer.
|
||||
//
|
||||
// The 'extra_space' argument can be used to allocate some space in the exit
|
||||
// frame that will be ignored by the GC. This space will be reserved in the
|
||||
@ -1989,10 +1960,10 @@ class MacroAssembler : public TurboAssembler {
|
||||
// * Preserved doubles are restored (if restore_doubles is true).
|
||||
// * The frame information is removed from the top frame.
|
||||
// * The exit frame is dropped.
|
||||
// * The stack pointer is reset to jssp.
|
||||
//
|
||||
// The stack pointer must be csp on entry.
|
||||
void LeaveExitFrame(bool save_doubles, const Register& scratch);
|
||||
void LeaveExitFrame(bool save_doubles, const Register& scratch,
|
||||
const Register& scratch2);
|
||||
|
||||
// Load the global proxy from the current context.
|
||||
void LoadGlobalProxy(Register dst) {
|
||||
@ -2042,9 +2013,8 @@ class MacroAssembler : public TurboAssembler {
|
||||
// Debugging.
|
||||
|
||||
void AssertRegisterIsRoot(
|
||||
Register reg,
|
||||
Heap::RootListIndex index,
|
||||
BailoutReason reason = kRegisterDidNotMatchExpectedRoot);
|
||||
Register reg, Heap::RootListIndex index,
|
||||
AbortReason reason = AbortReason::kRegisterDidNotMatchExpectedRoot);
|
||||
|
||||
// Abort if the specified register contains the invalid color bit pattern.
|
||||
// The pattern must be in bits [1:0] of 'reg' register.
|
||||
|
227
deps/v8/src/arm64/simulator-arm64.cc
vendored
227
deps/v8/src/arm64/simulator-arm64.cc
vendored
@ -98,13 +98,6 @@ SimSystemRegister SimSystemRegister::DefaultValueFor(SystemRegister id) {
|
||||
}
|
||||
|
||||
|
||||
void Simulator::Initialize(Isolate* isolate) {
|
||||
if (isolate->simulator_initialized()) return;
|
||||
isolate->set_simulator_initialized(true);
|
||||
ExternalReference::set_redirector(isolate, &RedirectExternalReference);
|
||||
}
|
||||
|
||||
|
||||
// Get the active Simulator for the current thread.
|
||||
Simulator* Simulator::current(Isolate* isolate) {
|
||||
Isolate::PerIsolateThreadData* isolate_data =
|
||||
@ -124,8 +117,7 @@ Simulator* Simulator::current(Isolate* isolate) {
|
||||
return sim;
|
||||
}
|
||||
|
||||
|
||||
void Simulator::CallVoid(byte* entry, CallArgument* args) {
|
||||
void Simulator::CallImpl(byte* entry, CallArgument* args) {
|
||||
int index_x = 0;
|
||||
int index_d = 0;
|
||||
|
||||
@ -167,63 +159,6 @@ void Simulator::CallVoid(byte* entry, CallArgument* args) {
|
||||
set_sp(original_stack);
|
||||
}
|
||||
|
||||
|
||||
int64_t Simulator::CallInt64(byte* entry, CallArgument* args) {
|
||||
CallVoid(entry, args);
|
||||
return xreg(0);
|
||||
}
|
||||
|
||||
|
||||
double Simulator::CallDouble(byte* entry, CallArgument* args) {
|
||||
CallVoid(entry, args);
|
||||
return dreg(0);
|
||||
}
|
||||
|
||||
|
||||
int64_t Simulator::CallJS(byte* entry,
|
||||
Object* new_target,
|
||||
Object* target,
|
||||
Object* revc,
|
||||
int64_t argc,
|
||||
Object*** argv) {
|
||||
CallArgument args[] = {
|
||||
CallArgument(new_target),
|
||||
CallArgument(target),
|
||||
CallArgument(revc),
|
||||
CallArgument(argc),
|
||||
CallArgument(argv),
|
||||
CallArgument::End()
|
||||
};
|
||||
return CallInt64(entry, args);
|
||||
}
|
||||
|
||||
|
||||
int64_t Simulator::CallRegExp(byte* entry,
|
||||
String* input,
|
||||
int64_t start_offset,
|
||||
const byte* input_start,
|
||||
const byte* input_end,
|
||||
int* output,
|
||||
int64_t output_size,
|
||||
Address stack_base,
|
||||
int64_t direct_call,
|
||||
Isolate* isolate) {
|
||||
CallArgument args[] = {
|
||||
CallArgument(input),
|
||||
CallArgument(start_offset),
|
||||
CallArgument(input_start),
|
||||
CallArgument(input_end),
|
||||
CallArgument(output),
|
||||
CallArgument(output_size),
|
||||
CallArgument(stack_base),
|
||||
CallArgument(direct_call),
|
||||
CallArgument(isolate),
|
||||
CallArgument::End()
|
||||
};
|
||||
return CallInt64(entry, args);
|
||||
}
|
||||
|
||||
|
||||
void Simulator::CheckPCSComplianceAndRun() {
|
||||
// Adjust JS-based stack limit to C-based stack limit.
|
||||
isolate_->stack_guard()->AdjustStackLimitForSimulator();
|
||||
@ -350,6 +285,11 @@ uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
|
||||
return stack_limit_ + 1024;
|
||||
}
|
||||
|
||||
void Simulator::SetRedirectInstruction(Instruction* instruction) {
|
||||
instruction->SetInstructionBits(
|
||||
HLT | Assembler::ImmException(kImmExceptionIsRedirectedCall));
|
||||
}
|
||||
|
||||
Simulator::Simulator(Decoder<DispatchingDecoderVisitor>* decoder,
|
||||
Isolate* isolate, FILE* stream)
|
||||
: decoder_(decoder),
|
||||
@ -392,7 +332,7 @@ void Simulator::Init(FILE* stream) {
|
||||
stack_limit_ = stack_ + stack_protection_size_;
|
||||
uintptr_t tos = stack_ + stack_size_ - stack_protection_size_;
|
||||
// The stack pointer must be 16-byte aligned.
|
||||
set_sp(tos & ~0xfUL);
|
||||
set_sp(tos & ~0xFUL);
|
||||
|
||||
stream_ = stream;
|
||||
print_disasm_ = new PrintDisassembler(stream_);
|
||||
@ -412,11 +352,11 @@ void Simulator::ResetState() {
|
||||
// Reset registers to 0.
|
||||
pc_ = nullptr;
|
||||
for (unsigned i = 0; i < kNumberOfRegisters; i++) {
|
||||
set_xreg(i, 0xbadbeef);
|
||||
set_xreg(i, 0xBADBEEF);
|
||||
}
|
||||
for (unsigned i = 0; i < kNumberOfVRegisters; i++) {
|
||||
// Set FP registers to a value that is NaN in both 32-bit and 64-bit FP.
|
||||
set_dreg_bits(i, 0x7ff000007f800001UL);
|
||||
set_dreg_bits(i, 0x7FF000007F800001UL);
|
||||
}
|
||||
// Returning to address 0 exits the Simulator.
|
||||
set_lr(kEndOfSimAddress);
|
||||
@ -458,82 +398,6 @@ void Simulator::RunFrom(Instruction* start) {
|
||||
}
|
||||
|
||||
|
||||
// When the generated code calls an external reference we need to catch that in
|
||||
// the simulator. The external reference will be a function compiled for the
|
||||
// host architecture. We need to call that function instead of trying to
|
||||
// execute it with the simulator. We do that by redirecting the external
|
||||
// reference to a svc (Supervisor Call) instruction that is handled by
|
||||
// the simulator. We write the original destination of the jump just at a known
|
||||
// offset from the svc instruction so the simulator knows what to call.
|
||||
class Redirection {
|
||||
public:
|
||||
Redirection(Isolate* isolate, void* external_function,
|
||||
ExternalReference::Type type)
|
||||
: external_function_(external_function), type_(type), next_(nullptr) {
|
||||
redirect_call_.SetInstructionBits(
|
||||
HLT | Assembler::ImmException(kImmExceptionIsRedirectedCall));
|
||||
next_ = isolate->simulator_redirection();
|
||||
// TODO(all): Simulator flush I cache
|
||||
isolate->set_simulator_redirection(this);
|
||||
}
|
||||
|
||||
void* address_of_redirect_call() {
|
||||
return reinterpret_cast<void*>(&redirect_call_);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T external_function() { return reinterpret_cast<T>(external_function_); }
|
||||
|
||||
ExternalReference::Type type() { return type_; }
|
||||
|
||||
static Redirection* Get(Isolate* isolate, void* external_function,
|
||||
ExternalReference::Type type) {
|
||||
Redirection* current = isolate->simulator_redirection();
|
||||
for (; current != nullptr; current = current->next_) {
|
||||
if (current->external_function_ == external_function &&
|
||||
current->type_ == type) {
|
||||
return current;
|
||||
}
|
||||
}
|
||||
return new Redirection(isolate, external_function, type);
|
||||
}
|
||||
|
||||
static Redirection* FromHltInstruction(Instruction* redirect_call) {
|
||||
char* addr_of_hlt = reinterpret_cast<char*>(redirect_call);
|
||||
char* addr_of_redirection =
|
||||
addr_of_hlt - offsetof(Redirection, redirect_call_);
|
||||
return reinterpret_cast<Redirection*>(addr_of_redirection);
|
||||
}
|
||||
|
||||
static void* ReverseRedirection(int64_t reg) {
|
||||
Redirection* redirection =
|
||||
FromHltInstruction(reinterpret_cast<Instruction*>(reg));
|
||||
return redirection->external_function<void*>();
|
||||
}
|
||||
|
||||
static void DeleteChain(Redirection* redirection) {
|
||||
while (redirection != nullptr) {
|
||||
Redirection* next = redirection->next_;
|
||||
delete redirection;
|
||||
redirection = next;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
void* external_function_;
|
||||
Instruction redirect_call_;
|
||||
ExternalReference::Type type_;
|
||||
Redirection* next_;
|
||||
};
|
||||
|
||||
|
||||
// static
|
||||
void Simulator::TearDown(base::CustomMatcherHashMap* i_cache,
|
||||
Redirection* first) {
|
||||
Redirection::DeleteChain(first);
|
||||
}
|
||||
|
||||
|
||||
// Calls into the V8 runtime are based on this very simple interface.
|
||||
// Note: To be able to return two values from some calls the code in runtime.cc
|
||||
// uses the ObjectPair structure.
|
||||
@ -561,20 +425,20 @@ typedef void (*SimulatorRuntimeProfilingGetterCall)(int64_t arg0, int64_t arg1,
|
||||
void* arg2);
|
||||
|
||||
void Simulator::DoRuntimeCall(Instruction* instr) {
|
||||
Redirection* redirection = Redirection::FromHltInstruction(instr);
|
||||
Redirection* redirection = Redirection::FromInstruction(instr);
|
||||
|
||||
// The called C code might itself call simulated code, so any
|
||||
// caller-saved registers (including lr) could still be clobbered by a
|
||||
// redirected call.
|
||||
Instruction* return_address = lr();
|
||||
|
||||
int64_t external = redirection->external_function<int64_t>();
|
||||
int64_t external =
|
||||
reinterpret_cast<int64_t>(redirection->external_function());
|
||||
|
||||
TraceSim("Call to host function at %p\n",
|
||||
redirection->external_function<void*>());
|
||||
TraceSim("Call to host function at %p\n", redirection->external_function());
|
||||
|
||||
// SP must be 16-byte-aligned at the call interface.
|
||||
bool stack_alignment_exception = ((sp() & 0xf) != 0);
|
||||
bool stack_alignment_exception = ((sp() & 0xF) != 0);
|
||||
if (stack_alignment_exception) {
|
||||
TraceSim(" with unaligned stack 0x%016" PRIx64 ".\n", sp());
|
||||
FATAL("ALIGNMENT EXCEPTION");
|
||||
@ -761,28 +625,17 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
|
||||
set_pc(return_address);
|
||||
}
|
||||
|
||||
|
||||
void* Simulator::RedirectExternalReference(Isolate* isolate,
|
||||
void* external_function,
|
||||
ExternalReference::Type type) {
|
||||
base::LockGuard<base::Mutex> lock_guard(
|
||||
isolate->simulator_redirection_mutex());
|
||||
Redirection* redirection = Redirection::Get(isolate, external_function, type);
|
||||
return redirection->address_of_redirect_call();
|
||||
}
|
||||
|
||||
|
||||
const char* Simulator::xreg_names[] = {
|
||||
"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
|
||||
"x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
|
||||
"ip0", "ip1", "x18", "x19", "x20", "x21", "x22", "x23",
|
||||
"x24", "x25", "x26", "cp", "jssp", "fp", "lr", "xzr", "csp"};
|
||||
"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8",
|
||||
"x9", "x10", "x11", "x12", "x13", "x14", "x15", "ip0", "ip1",
|
||||
"x18", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26",
|
||||
"cp", "x28", "fp", "lr", "xzr", "csp"};
|
||||
|
||||
const char* Simulator::wreg_names[] = {
|
||||
"w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7",
|
||||
"w8", "w9", "w10", "w11", "w12", "w13", "w14", "w15",
|
||||
"w16", "w17", "w18", "w19", "w20", "w21", "w22", "w23",
|
||||
"w24", "w25", "w26", "wcp", "wjssp", "wfp", "wlr", "wzr", "wcsp"};
|
||||
"w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8",
|
||||
"w9", "w10", "w11", "w12", "w13", "w14", "w15", "w16", "w17",
|
||||
"w18", "w19", "w20", "w21", "w22", "w23", "w24", "w25", "w26",
|
||||
"wcp", "w28", "wfp", "wlr", "wzr", "wcsp"};
|
||||
|
||||
const char* Simulator::sreg_names[] = {
|
||||
"s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
|
||||
@ -1294,9 +1147,9 @@ void Simulator::PrintRegister(unsigned code, Reg31Mode r31mode) {
|
||||
// a floating-point interpretation or a memory access annotation).
|
||||
void Simulator::PrintVRegisterRawHelper(unsigned code, int bytes, int lsb) {
|
||||
// The template for vector types:
|
||||
// "# v{code}: 0xffeeddccbbaa99887766554433221100".
|
||||
// "# v{code}: 0xFFEEDDCCBBAA99887766554433221100".
|
||||
// An example with bytes=4 and lsb=8:
|
||||
// "# v{code}: 0xbbaa9988 ".
|
||||
// "# v{code}: 0xBBAA9988 ".
|
||||
fprintf(stream_, "# %s%5s: %s", clr_vreg_name, VRegNameForCode(code),
|
||||
clr_vreg_value);
|
||||
|
||||
@ -1393,8 +1246,8 @@ void Simulator::PrintVRegisterFPHelper(unsigned code,
|
||||
void Simulator::PrintRegisterRawHelper(unsigned code, Reg31Mode r31mode,
|
||||
int size_in_bytes) {
|
||||
// The template for all supported sizes.
|
||||
// "# x{code}: 0xffeeddccbbaa9988"
|
||||
// "# w{code}: 0xbbaa9988"
|
||||
// "# x{code}: 0xFFEEDDCCBBAA9988"
|
||||
// "# w{code}: 0xBBAA9988"
|
||||
// "# w{code}<15:0>: 0x9988"
|
||||
// "# w{code}<7:0>: 0x88"
|
||||
unsigned padding_chars = (kXRegSize - size_in_bytes) * 2;
|
||||
@ -2367,8 +2220,8 @@ void Simulator::VisitMoveWideImmediate(Instruction* instr) {
|
||||
unsigned reg_code = instr->Rd();
|
||||
int64_t prev_xn_val = is_64_bits ? xreg(reg_code)
|
||||
: wreg(reg_code);
|
||||
new_xn_val = (prev_xn_val & ~(0xffffL << shift)) | shifted_imm16;
|
||||
break;
|
||||
new_xn_val = (prev_xn_val & ~(0xFFFFL << shift)) | shifted_imm16;
|
||||
break;
|
||||
}
|
||||
case MOVZ_w:
|
||||
case MOVZ_x: {
|
||||
@ -2532,14 +2385,14 @@ static int64_t MultiplyHighSigned(int64_t u, int64_t v) {
|
||||
uint64_t u0, v0, w0;
|
||||
int64_t u1, v1, w1, w2, t;
|
||||
|
||||
u0 = u & 0xffffffffL;
|
||||
u0 = u & 0xFFFFFFFFL;
|
||||
u1 = u >> 32;
|
||||
v0 = v & 0xffffffffL;
|
||||
v0 = v & 0xFFFFFFFFL;
|
||||
v1 = v >> 32;
|
||||
|
||||
w0 = u0 * v0;
|
||||
t = u1 * v0 + (w0 >> 32);
|
||||
w1 = t & 0xffffffffL;
|
||||
w1 = t & 0xFFFFFFFFL;
|
||||
w2 = t >> 32;
|
||||
w1 = u0 * v1 + w1;
|
||||
|
||||
@ -3344,7 +3197,7 @@ void Simulator::Debug() {
|
||||
int next_arg = 1;
|
||||
|
||||
if (strcmp(cmd, "stack") == 0) {
|
||||
cur = reinterpret_cast<int64_t*>(jssp());
|
||||
cur = reinterpret_cast<int64_t*>(sp());
|
||||
|
||||
} else { // "mem"
|
||||
int64_t value;
|
||||
@ -3381,7 +3234,7 @@ void Simulator::Debug() {
|
||||
PrintF(" (");
|
||||
if ((value & kSmiTagMask) == 0) {
|
||||
STATIC_ASSERT(kSmiValueSize == 32);
|
||||
int32_t untagged = (value >> kSmiShift) & 0xffffffff;
|
||||
int32_t untagged = (value >> kSmiShift) & 0xFFFFFFFF;
|
||||
PrintF("smi %" PRId32, untagged);
|
||||
} else {
|
||||
obj->ShortPrint();
|
||||
@ -4344,7 +4197,7 @@ void Simulator::VisitNEONByIndexedElement(Instruction* instr) {
|
||||
int rm_reg = instr->Rm();
|
||||
int index = (instr->NEONH() << 1) | instr->NEONL();
|
||||
if (instr->NEONSize() == 1) {
|
||||
rm_reg &= 0xf;
|
||||
rm_reg &= 0xF;
|
||||
index = (index << 1) | instr->NEONM();
|
||||
}
|
||||
|
||||
@ -4909,9 +4762,9 @@ void Simulator::VisitNEONModifiedImmediate(Instruction* instr) {
|
||||
case 0x6:
|
||||
vform = (q == 1) ? kFormat4S : kFormat2S;
|
||||
if (cmode_0 == 0) {
|
||||
imm = imm8 << 8 | 0x000000ff;
|
||||
imm = imm8 << 8 | 0x000000FF;
|
||||
} else {
|
||||
imm = imm8 << 16 | 0x0000ffff;
|
||||
imm = imm8 << 16 | 0x0000FFFF;
|
||||
}
|
||||
break;
|
||||
case 0x7:
|
||||
@ -4923,10 +4776,10 @@ void Simulator::VisitNEONModifiedImmediate(Instruction* instr) {
|
||||
imm = 0;
|
||||
for (int i = 0; i < 8; ++i) {
|
||||
if (imm8 & (1 << i)) {
|
||||
imm |= (UINT64_C(0xff) << (8 * i));
|
||||
imm |= (UINT64_C(0xFF) << (8 * i));
|
||||
}
|
||||
}
|
||||
} else { // cmode_0 == 1, cmode == 0xf.
|
||||
} else { // cmode_0 == 1, cmode == 0xF.
|
||||
if (op_bit == 0) {
|
||||
vform = q ? kFormat4S : kFormat2S;
|
||||
imm = bit_cast<uint32_t>(instr->ImmNEONFP32());
|
||||
@ -4934,7 +4787,7 @@ void Simulator::VisitNEONModifiedImmediate(Instruction* instr) {
|
||||
vform = kFormat2D;
|
||||
imm = bit_cast<uint64_t>(instr->ImmNEONFP64());
|
||||
} else {
|
||||
DCHECK((q == 0) && (op_bit == 1) && (cmode == 0xf));
|
||||
DCHECK((q == 0) && (op_bit == 1) && (cmode == 0xF));
|
||||
VisitUnallocated(instr);
|
||||
}
|
||||
}
|
||||
@ -5278,7 +5131,7 @@ void Simulator::VisitNEONScalarByIndexedElement(Instruction* instr) {
|
||||
int rm_reg = instr->Rm();
|
||||
int index = (instr->NEONH() << 1) | instr->NEONL();
|
||||
if (instr->NEONSize() == 1) {
|
||||
rm_reg &= 0xf;
|
||||
rm_reg &= 0xF;
|
||||
index = (index << 1) | instr->NEONM();
|
||||
}
|
||||
|
||||
|
156
deps/v8/src/arm64/simulator-arm64.h
vendored
156
deps/v8/src/arm64/simulator-arm64.h
vendored
@ -16,56 +16,13 @@
|
||||
#include "src/assembler.h"
|
||||
#include "src/base/compiler-specific.h"
|
||||
#include "src/globals.h"
|
||||
#include "src/simulator-base.h"
|
||||
#include "src/utils.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
#if !defined(USE_SIMULATOR)
|
||||
|
||||
// Running without a simulator on a native ARM64 platform.
|
||||
// When running without a simulator we call the entry directly.
|
||||
#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
|
||||
(entry(p0, p1, p2, p3, p4))
|
||||
|
||||
typedef int (*arm64_regexp_matcher)(String* input,
|
||||
int64_t start_offset,
|
||||
const byte* input_start,
|
||||
const byte* input_end,
|
||||
int* output,
|
||||
int64_t output_size,
|
||||
Address stack_base,
|
||||
int64_t direct_call,
|
||||
Isolate* isolate);
|
||||
|
||||
// Call the generated regexp code directly. The code at the entry address
|
||||
// should act as a function matching the type arm64_regexp_matcher.
|
||||
#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
|
||||
p7, p8) \
|
||||
(FUNCTION_CAST<arm64_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7, \
|
||||
p8))
|
||||
|
||||
// Running without a simulator there is nothing to do.
|
||||
class SimulatorStack : public v8::internal::AllStatic {
|
||||
public:
|
||||
static uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
|
||||
uintptr_t c_limit) {
|
||||
USE(isolate);
|
||||
return c_limit;
|
||||
}
|
||||
|
||||
static uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
|
||||
uintptr_t try_catch_address) {
|
||||
USE(isolate);
|
||||
return try_catch_address;
|
||||
}
|
||||
|
||||
static void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
|
||||
USE(isolate);
|
||||
}
|
||||
};
|
||||
|
||||
#else // !defined(USE_SIMULATOR)
|
||||
#if defined(USE_SIMULATOR)
|
||||
|
||||
// Assemble the specified IEEE-754 components into the target type and apply
|
||||
// appropriate rounding.
|
||||
@ -269,6 +226,10 @@ T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
|
||||
}
|
||||
}
|
||||
|
||||
class CachePage {
|
||||
// TODO(all): Simulate instruction cache.
|
||||
};
|
||||
|
||||
// Representation of memory, with typed getters and setters for access.
|
||||
class SimMemory {
|
||||
public:
|
||||
@ -680,8 +641,11 @@ class LogicVRegister {
|
||||
bool round_[kQRegSize];
|
||||
};
|
||||
|
||||
class Simulator : public DecoderVisitor {
|
||||
// Using multiple inheritance here is permitted because {DecoderVisitor} is a
|
||||
// pure interface class with only pure virtual methods.
|
||||
class Simulator : public DecoderVisitor, public SimulatorBase {
|
||||
public:
|
||||
static void SetRedirectInstruction(Instruction* instruction);
|
||||
static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
|
||||
size_t size) {
|
||||
USE(i_cache);
|
||||
@ -696,42 +660,7 @@ class Simulator : public DecoderVisitor {
|
||||
|
||||
// System functions.
|
||||
|
||||
static void Initialize(Isolate* isolate);
|
||||
|
||||
static void TearDown(base::CustomMatcherHashMap* i_cache, Redirection* first);
|
||||
|
||||
static Simulator* current(v8::internal::Isolate* isolate);
|
||||
|
||||
class CallArgument;
|
||||
|
||||
// Call an arbitrary function taking an arbitrary number of arguments. The
|
||||
// varargs list must be a set of arguments with type CallArgument, and
|
||||
// terminated by CallArgument::End().
|
||||
void CallVoid(byte* entry, CallArgument* args);
|
||||
|
||||
// Like CallVoid, but expect a return value.
|
||||
int64_t CallInt64(byte* entry, CallArgument* args);
|
||||
double CallDouble(byte* entry, CallArgument* args);
|
||||
|
||||
// V8 calls into generated JS code with 5 parameters and into
|
||||
// generated RegExp code with 10 parameters. These are convenience functions,
|
||||
// which set up the simulator state and grab the result on return.
|
||||
int64_t CallJS(byte* entry,
|
||||
Object* new_target,
|
||||
Object* target,
|
||||
Object* revc,
|
||||
int64_t argc,
|
||||
Object*** argv);
|
||||
int64_t CallRegExp(byte* entry,
|
||||
String* input,
|
||||
int64_t start_offset,
|
||||
const byte* input_start,
|
||||
const byte* input_end,
|
||||
int* output,
|
||||
int64_t output_size,
|
||||
Address stack_base,
|
||||
int64_t direct_call,
|
||||
Isolate* isolate);
|
||||
V8_EXPORT_PRIVATE static Simulator* current(v8::internal::Isolate* isolate);
|
||||
|
||||
// A wrapper class that stores an argument for one of the above Call
|
||||
// functions.
|
||||
@ -787,6 +716,14 @@ class Simulator : public DecoderVisitor {
|
||||
CallArgument() { type_ = NO_ARG; }
|
||||
};
|
||||
|
||||
// Call an arbitrary function taking an arbitrary number of arguments.
|
||||
template <typename Return, typename... Args>
|
||||
Return Call(byte* entry, Args... args) {
|
||||
// Convert all arguments to CallArgument.
|
||||
CallArgument call_args[] = {CallArgument(args)..., CallArgument::End()};
|
||||
CallImpl(entry, call_args);
|
||||
return ReadReturn<Return>();
|
||||
}
|
||||
|
||||
// Start the debugging command line.
|
||||
void Debug();
|
||||
@ -806,10 +743,6 @@ class Simulator : public DecoderVisitor {
|
||||
|
||||
void ResetState();
|
||||
|
||||
// Runtime call support. Uses the isolate in a thread-safe way.
|
||||
static void* RedirectExternalReference(Isolate* isolate,
|
||||
void* external_function,
|
||||
ExternalReference::Type type);
|
||||
void DoRuntimeCall(Instruction* instr);
|
||||
|
||||
// Run the simulator.
|
||||
@ -958,7 +891,6 @@ class Simulator : public DecoderVisitor {
|
||||
inline SimVRegister& vreg(unsigned code) { return vregisters_[code]; }
|
||||
|
||||
int64_t sp() { return xreg(31, Reg31IsStackPointer); }
|
||||
int64_t jssp() { return xreg(kJSSPCode, Reg31IsStackPointer); }
|
||||
int64_t fp() {
|
||||
return xreg(kFramePointerRegCode, Reg31IsStackPointer);
|
||||
}
|
||||
@ -2345,6 +2277,21 @@ class Simulator : public DecoderVisitor {
|
||||
private:
|
||||
void Init(FILE* stream);
|
||||
|
||||
V8_EXPORT_PRIVATE void CallImpl(byte* entry, CallArgument* args);
|
||||
|
||||
// Read floating point return values.
|
||||
template <typename T>
|
||||
typename std::enable_if<std::is_floating_point<T>::value, T>::type
|
||||
ReadReturn() {
|
||||
return static_cast<T>(dreg(0));
|
||||
}
|
||||
// Read non-float return values.
|
||||
template <typename T>
|
||||
typename std::enable_if<!std::is_floating_point<T>::value, T>::type
|
||||
ReadReturn() {
|
||||
return ConvertReturn<T>(xreg(0));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static T FPDefaultNaN();
|
||||
|
||||
@ -2407,40 +2354,7 @@ inline float Simulator::FPDefaultNaN<float>() {
|
||||
return kFP32DefaultNaN;
|
||||
}
|
||||
|
||||
// When running with the simulator transition into simulated execution at this
|
||||
// point.
|
||||
#define CALL_GENERATED_CODE(isolate, entry, p0, p1, p2, p3, p4) \
|
||||
reinterpret_cast<Object*>(Simulator::current(isolate)->CallJS( \
|
||||
FUNCTION_ADDR(entry), p0, p1, p2, p3, p4))
|
||||
|
||||
#define CALL_GENERATED_REGEXP_CODE(isolate, entry, p0, p1, p2, p3, p4, p5, p6, \
|
||||
p7, p8) \
|
||||
static_cast<int>(Simulator::current(isolate)->CallRegExp( \
|
||||
entry, p0, p1, p2, p3, p4, p5, p6, p7, p8))
|
||||
|
||||
// The simulator has its own stack. Thus it has a different stack limit from
|
||||
// the C-based native code. The JS-based limit normally points near the end of
|
||||
// the simulator stack. When the C-based limit is exhausted we reflect that by
|
||||
// lowering the JS-based limit as well, to make stack checks trigger.
|
||||
class SimulatorStack : public v8::internal::AllStatic {
|
||||
public:
|
||||
static uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
|
||||
uintptr_t c_limit) {
|
||||
return Simulator::current(isolate)->StackLimit(c_limit);
|
||||
}
|
||||
|
||||
static uintptr_t RegisterCTryCatch(v8::internal::Isolate* isolate,
|
||||
uintptr_t try_catch_address) {
|
||||
Simulator* sim = Simulator::current(isolate);
|
||||
return sim->PushAddress(try_catch_address);
|
||||
}
|
||||
|
||||
static void UnregisterCTryCatch(v8::internal::Isolate* isolate) {
|
||||
Simulator::current(isolate)->PopAddress();
|
||||
}
|
||||
};
|
||||
|
||||
#endif // !defined(USE_SIMULATOR)
|
||||
#endif // defined(USE_SIMULATOR)
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
4
deps/v8/src/arm64/simulator-logic-arm64.cc
vendored
4
deps/v8/src/arm64/simulator-logic-arm64.cc
vendored
@ -3986,9 +3986,9 @@ T Simulator::FPRecipEstimate(T op, FPRounding rounding) {
|
||||
} else {
|
||||
// Return FPMaxNormal(sign).
|
||||
if (sizeof(T) == sizeof(float)) {
|
||||
return float_pack(sign, 0xfe, 0x07fffff);
|
||||
return float_pack(sign, 0xFE, 0x07FFFFF);
|
||||
} else {
|
||||
return double_pack(sign, 0x7fe, 0x0fffffffffffffl);
|
||||
return double_pack(sign, 0x7FE, 0x0FFFFFFFFFFFFFl);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
4
deps/v8/src/arm64/utils-arm64.cc
vendored
4
deps/v8/src/arm64/utils-arm64.cc
vendored
@ -98,7 +98,7 @@ int CountTrailingZeros(uint64_t value, int width) {
|
||||
return static_cast<int>(base::bits::CountTrailingZeros64(value));
|
||||
}
|
||||
return static_cast<int>(base::bits::CountTrailingZeros32(
|
||||
static_cast<uint32_t>(value & 0xfffffffff)));
|
||||
static_cast<uint32_t>(value & 0xFFFFFFFFF)));
|
||||
}
|
||||
|
||||
|
||||
@ -108,7 +108,7 @@ int CountSetBits(uint64_t value, int width) {
|
||||
return static_cast<int>(base::bits::CountPopulation(value));
|
||||
}
|
||||
return static_cast<int>(
|
||||
base::bits::CountPopulation(static_cast<uint32_t>(value & 0xfffffffff)));
|
||||
base::bits::CountPopulation(static_cast<uint32_t>(value & 0xFFFFFFFFF)));
|
||||
}
|
||||
|
||||
int LowestSetBitPosition(uint64_t value) {
|
||||
|
36
deps/v8/src/asmjs/asm-parser.cc
vendored
36
deps/v8/src/asmjs/asm-parser.cc
vendored
@ -292,8 +292,7 @@ void AsmJsParser::Begin(AsmJsScanner::token_t label) {
|
||||
|
||||
void AsmJsParser::Loop(AsmJsScanner::token_t label) {
|
||||
BareBegin(BlockKind::kLoop, label);
|
||||
int position = static_cast<int>(scanner_.Position());
|
||||
DCHECK_EQ(position, scanner_.Position());
|
||||
size_t position = scanner_.Position();
|
||||
current_function_builder_->AddAsmWasmOffset(position, position);
|
||||
current_function_builder_->EmitWithU8(kExprLoop, kLocalVoid);
|
||||
}
|
||||
@ -450,7 +449,7 @@ void AsmJsParser::ValidateModuleVar(bool mutable_variable) {
|
||||
DeclareGlobal(info, mutable_variable, AsmType::Double(), kWasmF64,
|
||||
WasmInitExpr(dvalue));
|
||||
} else if (CheckForUnsigned(&uvalue)) {
|
||||
if (uvalue > 0x7fffffff) {
|
||||
if (uvalue > 0x7FFFFFFF) {
|
||||
FAIL("Numeric literal out of range");
|
||||
}
|
||||
DeclareGlobal(info, mutable_variable,
|
||||
@ -461,7 +460,7 @@ void AsmJsParser::ValidateModuleVar(bool mutable_variable) {
|
||||
DeclareGlobal(info, mutable_variable, AsmType::Double(), kWasmF64,
|
||||
WasmInitExpr(-dvalue));
|
||||
} else if (CheckForUnsigned(&uvalue)) {
|
||||
if (uvalue > 0x7fffffff) {
|
||||
if (uvalue > 0x7FFFFFFF) {
|
||||
FAIL("Numeric literal out of range");
|
||||
}
|
||||
DeclareGlobal(info, mutable_variable,
|
||||
@ -742,8 +741,7 @@ void AsmJsParser::ValidateFunction() {
|
||||
return_type_ = nullptr;
|
||||
|
||||
// Record start of the function, used as position for the stack check.
|
||||
int start_position = static_cast<int>(scanner_.Position());
|
||||
current_function_builder_->SetAsmFunctionStartPosition(start_position);
|
||||
current_function_builder_->SetAsmFunctionStartPosition(scanner_.Position());
|
||||
|
||||
CachedVector<AsmType*> params(cached_asm_type_p_vectors_);
|
||||
ValidateFunctionParams(¶ms);
|
||||
@ -902,7 +900,7 @@ void AsmJsParser::ValidateFunctionLocals(size_t param_count,
|
||||
current_function_builder_->EmitF64Const(-dvalue);
|
||||
current_function_builder_->EmitSetLocal(info->index);
|
||||
} else if (CheckForUnsigned(&uvalue)) {
|
||||
if (uvalue > 0x7fffffff) {
|
||||
if (uvalue > 0x7FFFFFFF) {
|
||||
FAIL("Numeric literal out of range");
|
||||
}
|
||||
info->kind = VarKind::kLocal;
|
||||
@ -954,7 +952,7 @@ void AsmJsParser::ValidateFunctionLocals(size_t param_count,
|
||||
current_function_builder_->EmitF32Const(dvalue);
|
||||
current_function_builder_->EmitSetLocal(info->index);
|
||||
} else if (CheckForUnsigned(&uvalue)) {
|
||||
if (uvalue > 0x7fffffff) {
|
||||
if (uvalue > 0x7FFFFFFF) {
|
||||
FAIL("Numeric literal out of range");
|
||||
}
|
||||
info->kind = VarKind::kLocal;
|
||||
@ -1337,7 +1335,7 @@ void AsmJsParser::ValidateCase() {
|
||||
FAIL("Expected numeric literal");
|
||||
}
|
||||
// TODO(bradnelson): Share negation plumbing.
|
||||
if ((negate && uvalue > 0x80000000) || (!negate && uvalue > 0x7fffffff)) {
|
||||
if ((negate && uvalue > 0x80000000) || (!negate && uvalue > 0x7FFFFFFF)) {
|
||||
FAIL("Numeric literal out of range");
|
||||
}
|
||||
int32_t value = static_cast<int32_t>(uvalue);
|
||||
@ -1398,11 +1396,11 @@ AsmType* AsmJsParser::NumericLiteral() {
|
||||
current_function_builder_->EmitF64Const(dvalue);
|
||||
return AsmType::Double();
|
||||
} else if (CheckForUnsigned(&uvalue)) {
|
||||
if (uvalue <= 0x7fffffff) {
|
||||
if (uvalue <= 0x7FFFFFFF) {
|
||||
current_function_builder_->EmitI32Const(static_cast<int32_t>(uvalue));
|
||||
return AsmType::FixNum();
|
||||
} else {
|
||||
DCHECK_LE(uvalue, 0xffffffff);
|
||||
DCHECK_LE(uvalue, 0xFFFFFFFF);
|
||||
current_function_builder_->EmitI32Const(static_cast<int32_t>(uvalue));
|
||||
return AsmType::Unsigned();
|
||||
}
|
||||
@ -1553,7 +1551,7 @@ AsmType* AsmJsParser::UnaryExpression() {
|
||||
if (Check('-')) {
|
||||
uint32_t uvalue;
|
||||
if (CheckForUnsigned(&uvalue)) {
|
||||
// TODO(bradnelson): was supposed to be 0x7fffffff, check errata.
|
||||
// TODO(bradnelson): was supposed to be 0x7FFFFFFF, check errata.
|
||||
if (uvalue <= 0x80000000) {
|
||||
current_function_builder_->EmitI32Const(-static_cast<int32_t>(uvalue));
|
||||
} else {
|
||||
@ -1621,7 +1619,7 @@ AsmType* AsmJsParser::UnaryExpression() {
|
||||
if (!ret->IsA(AsmType::Intish())) {
|
||||
FAILn("operator ~ expects intish");
|
||||
}
|
||||
current_function_builder_->EmitI32Const(0xffffffff);
|
||||
current_function_builder_->EmitI32Const(0xFFFFFFFF);
|
||||
current_function_builder_->Emit(kExprI32Xor);
|
||||
ret = AsmType::Signed();
|
||||
}
|
||||
@ -2066,8 +2064,8 @@ AsmType* AsmJsParser::ParenthesizedExpression() {
|
||||
AsmType* AsmJsParser::ValidateCall() {
|
||||
AsmType* return_type = call_coercion_;
|
||||
call_coercion_ = nullptr;
|
||||
int call_pos = static_cast<int>(scanner_.Position());
|
||||
int to_number_pos = static_cast<int>(call_coercion_position_);
|
||||
size_t call_pos = scanner_.Position();
|
||||
size_t to_number_pos = call_coercion_position_;
|
||||
bool allow_peek = (call_coercion_deferred_position_ == scanner_.Position());
|
||||
AsmJsScanner::token_t function_name = Consume();
|
||||
|
||||
@ -2113,7 +2111,7 @@ AsmType* AsmJsParser::ValidateCall() {
|
||||
tmp.emplace(this);
|
||||
current_function_builder_->EmitSetLocal(tmp->get());
|
||||
// The position of function table calls is after the table lookup.
|
||||
call_pos = static_cast<int>(scanner_.Position());
|
||||
call_pos = scanner_.Position();
|
||||
} else {
|
||||
VarInfo* function_info = GetVarInfo(function_name);
|
||||
if (function_info->kind == VarKind::kUnused) {
|
||||
@ -2176,7 +2174,7 @@ AsmType* AsmJsParser::ValidateCall() {
|
||||
(return_type == nullptr || return_type->IsA(AsmType::Float()))) {
|
||||
DCHECK_NULL(call_coercion_deferred_);
|
||||
call_coercion_deferred_ = AsmType::Signed();
|
||||
to_number_pos = static_cast<int>(scanner_.Position());
|
||||
to_number_pos = scanner_.Position();
|
||||
return_type = AsmType::Signed();
|
||||
} else if (return_type == nullptr) {
|
||||
to_number_pos = call_pos; // No conversion.
|
||||
@ -2395,9 +2393,9 @@ void AsmJsParser::ValidateHeapAccess() {
|
||||
// TODO(bradnelson): Check more things.
|
||||
// TODO(mstarzinger): Clarify and explain where this limit is coming from,
|
||||
// as it is not mandated by the spec directly.
|
||||
if (offset > 0x7fffffff ||
|
||||
if (offset > 0x7FFFFFFF ||
|
||||
static_cast<uint64_t>(offset) * static_cast<uint64_t>(size) >
|
||||
0x7fffffff) {
|
||||
0x7FFFFFFF) {
|
||||
FAIL("Heap access out of range");
|
||||
}
|
||||
if (Check(']')) {
|
||||
|
2
deps/v8/src/asmjs/asm-scanner.cc
vendored
2
deps/v8/src/asmjs/asm-scanner.cc
vendored
@ -15,7 +15,7 @@ namespace internal {
|
||||
namespace {
|
||||
// Cap number of identifiers to ensure we can assign both global and
|
||||
// local ones a token id in the range of an int32_t.
|
||||
static const int kMaxIdentifierCount = 0xf000000;
|
||||
static const int kMaxIdentifierCount = 0xF000000;
|
||||
};
|
||||
|
||||
AsmJsScanner::AsmJsScanner(Utf16CharacterStream* stream)
|
||||
|
36
deps/v8/src/assembler.cc
vendored
36
deps/v8/src/assembler.cc
vendored
@ -131,14 +131,14 @@ static struct V8_ALIGNED(16) {
|
||||
static struct V8_ALIGNED(16) {
|
||||
uint64_t a;
|
||||
uint64_t b;
|
||||
} double_absolute_constant = {V8_UINT64_C(0x7FFFFFFFFFFFFFFF),
|
||||
V8_UINT64_C(0x7FFFFFFFFFFFFFFF)};
|
||||
} double_absolute_constant = {uint64_t{0x7FFFFFFFFFFFFFFF},
|
||||
uint64_t{0x7FFFFFFFFFFFFFFF}};
|
||||
|
||||
static struct V8_ALIGNED(16) {
|
||||
uint64_t a;
|
||||
uint64_t b;
|
||||
} double_negate_constant = {V8_UINT64_C(0x8000000000000000),
|
||||
V8_UINT64_C(0x8000000000000000)};
|
||||
} double_negate_constant = {uint64_t{0x8000000000000000},
|
||||
uint64_t{0x8000000000000000}};
|
||||
|
||||
const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
|
||||
|
||||
@ -351,7 +351,7 @@ void RelocInfo::set_target_address(Isolate* isolate, Address target,
|
||||
WriteBarrierMode write_barrier_mode,
|
||||
ICacheFlushMode icache_flush_mode) {
|
||||
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
|
||||
Assembler::set_target_address_at(isolate, pc_, host_, target,
|
||||
Assembler::set_target_address_at(isolate, pc_, constant_pool_, target,
|
||||
icache_flush_mode);
|
||||
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr &&
|
||||
IsCodeTarget(rmode_)) {
|
||||
@ -801,6 +801,16 @@ ExternalReference ExternalReference::builtins_address(Isolate* isolate) {
|
||||
return ExternalReference(isolate->builtins()->builtins_table_address());
|
||||
}
|
||||
|
||||
ExternalReference ExternalReference::handle_scope_implementer_address(
|
||||
Isolate* isolate) {
|
||||
return ExternalReference(isolate->handle_scope_implementer_address());
|
||||
}
|
||||
|
||||
ExternalReference ExternalReference::pending_microtask_count_address(
|
||||
Isolate* isolate) {
|
||||
return ExternalReference(isolate->pending_microtask_count_address());
|
||||
}
|
||||
|
||||
ExternalReference ExternalReference::interpreter_dispatch_table_address(
|
||||
Isolate* isolate) {
|
||||
return ExternalReference(isolate->interpreter()->dispatch_table_address());
|
||||
@ -1002,6 +1012,16 @@ ExternalReference ExternalReference::wasm_word64_popcnt(Isolate* isolate) {
|
||||
Redirect(isolate, FUNCTION_ADDR(wasm::word64_popcnt_wrapper)));
|
||||
}
|
||||
|
||||
ExternalReference ExternalReference::wasm_word32_rol(Isolate* isolate) {
|
||||
return ExternalReference(
|
||||
Redirect(isolate, FUNCTION_ADDR(wasm::word32_rol_wrapper)));
|
||||
}
|
||||
|
||||
ExternalReference ExternalReference::wasm_word32_ror(Isolate* isolate) {
|
||||
return ExternalReference(
|
||||
Redirect(isolate, FUNCTION_ADDR(wasm::word32_ror_wrapper)));
|
||||
}
|
||||
|
||||
static void f64_acos_wrapper(double* param) {
|
||||
WriteDoubleValue(param, base::ieee754::acos(ReadDoubleValue(param)));
|
||||
}
|
||||
@ -1514,6 +1534,12 @@ ExternalReference ExternalReference::runtime_function_table_address(
|
||||
const_cast<Runtime::Function*>(Runtime::RuntimeFunctionTable(isolate)));
|
||||
}
|
||||
|
||||
ExternalReference ExternalReference::invalidate_prototype_chains_function(
|
||||
Isolate* isolate) {
|
||||
return ExternalReference(
|
||||
Redirect(isolate, FUNCTION_ADDR(JSObject::InvalidatePrototypeChains)));
|
||||
}
|
||||
|
||||
double power_helper(Isolate* isolate, double x, double y) {
|
||||
int y_int = static_cast<int>(y);
|
||||
if (y == y_int) {
|
||||
|
28
deps/v8/src/assembler.h
vendored
28
deps/v8/src/assembler.h
vendored
@ -36,6 +36,7 @@
|
||||
#define V8_ASSEMBLER_H_
|
||||
|
||||
#include <forward_list>
|
||||
#include <iosfwd>
|
||||
|
||||
#include "src/allocation.h"
|
||||
#include "src/builtins/builtins.h"
|
||||
@ -54,9 +55,6 @@ namespace v8 {
|
||||
class ApiFunction;
|
||||
|
||||
namespace internal {
|
||||
namespace wasm {
|
||||
class WasmCode;
|
||||
}
|
||||
|
||||
// Forward declarations.
|
||||
class Isolate;
|
||||
@ -486,6 +484,7 @@ class RelocInfo {
|
||||
Mode rmode() const { return rmode_; }
|
||||
intptr_t data() const { return data_; }
|
||||
Code* host() const { return host_; }
|
||||
Address constant_pool() const { return constant_pool_; }
|
||||
|
||||
// Apply a relocation by delta bytes. When the code object is moved, PC
|
||||
// relative addresses have to be updated as well as absolute addresses
|
||||
@ -625,9 +624,6 @@ class RelocInfo {
|
||||
byte* pc_;
|
||||
Mode rmode_;
|
||||
intptr_t data_;
|
||||
// TODO(mtrofin): try remove host_, if all we need is the constant_pool_ or
|
||||
// other few attributes, like start address, etc. This is so that we can reuse
|
||||
// RelocInfo for WasmCode without having a modal design.
|
||||
Code* host_;
|
||||
Address constant_pool_ = nullptr;
|
||||
friend class RelocIterator;
|
||||
@ -830,6 +826,9 @@ class ExternalReference BASE_EMBEDDED {
|
||||
// The builtins table as an external reference, used by lazy deserialization.
|
||||
static ExternalReference builtins_address(Isolate* isolate);
|
||||
|
||||
static ExternalReference handle_scope_implementer_address(Isolate* isolate);
|
||||
static ExternalReference pending_microtask_count_address(Isolate* isolate);
|
||||
|
||||
// One-of-a-kind references. These references are not part of a general
|
||||
// pattern. This means that they have to be added to the
|
||||
// ExternalReferenceTable in serialize.cc manually.
|
||||
@ -875,6 +874,8 @@ class ExternalReference BASE_EMBEDDED {
|
||||
static ExternalReference wasm_word64_ctz(Isolate* isolate);
|
||||
static ExternalReference wasm_word32_popcnt(Isolate* isolate);
|
||||
static ExternalReference wasm_word64_popcnt(Isolate* isolate);
|
||||
static ExternalReference wasm_word32_rol(Isolate* isolate);
|
||||
static ExternalReference wasm_word32_ror(Isolate* isolate);
|
||||
static ExternalReference wasm_float64_pow(Isolate* isolate);
|
||||
static ExternalReference wasm_set_thread_in_wasm_flag(Isolate* isolate);
|
||||
static ExternalReference wasm_clear_thread_in_wasm_flag(Isolate* isolate);
|
||||
@ -1019,6 +1020,9 @@ class ExternalReference BASE_EMBEDDED {
|
||||
V8_EXPORT_PRIVATE static ExternalReference runtime_function_table_address(
|
||||
Isolate* isolate);
|
||||
|
||||
static ExternalReference invalidate_prototype_chains_function(
|
||||
Isolate* isolate);
|
||||
|
||||
Address address() const { return reinterpret_cast<Address>(address_); }
|
||||
|
||||
// Used to read out the last step action of the debugger.
|
||||
@ -1328,16 +1332,24 @@ class RegisterBase {
|
||||
|
||||
int bit() const { return 1 << code(); }
|
||||
|
||||
inline bool operator==(SubType other) const {
|
||||
inline constexpr bool operator==(SubType other) const {
|
||||
return reg_code_ == other.reg_code_;
|
||||
}
|
||||
inline bool operator!=(SubType other) const { return !(*this == other); }
|
||||
inline constexpr bool operator!=(SubType other) const {
|
||||
return reg_code_ != other.reg_code_;
|
||||
}
|
||||
|
||||
protected:
|
||||
explicit constexpr RegisterBase(int code) : reg_code_(code) {}
|
||||
int reg_code_;
|
||||
};
|
||||
|
||||
template <typename SubType, int kAfterLastRegister>
|
||||
inline std::ostream& operator<<(std::ostream& os,
|
||||
RegisterBase<SubType, kAfterLastRegister> reg) {
|
||||
return reg.is_valid() ? os << "r" << reg.code() : os << "<invalid reg>";
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
#endif // V8_ASSEMBLER_H_
|
||||
|
26
deps/v8/src/ast/ast-numbering.cc
vendored
26
deps/v8/src/ast/ast-numbering.cc
vendored
@ -16,10 +16,7 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
|
||||
public:
|
||||
AstNumberingVisitor(uintptr_t stack_limit, Zone* zone,
|
||||
Compiler::EagerInnerFunctionLiterals* eager_literals)
|
||||
: zone_(zone),
|
||||
eager_literals_(eager_literals),
|
||||
suspend_count_(0),
|
||||
dont_optimize_reason_(kNoReason) {
|
||||
: zone_(zone), eager_literals_(eager_literals), suspend_count_(0) {
|
||||
InitializeAstVisitor(stack_limit);
|
||||
}
|
||||
|
||||
@ -39,19 +36,12 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
|
||||
void VisitArguments(ZoneList<Expression*>* arguments);
|
||||
void VisitLiteralProperty(LiteralProperty* property);
|
||||
|
||||
void DisableOptimization(BailoutReason reason) {
|
||||
dont_optimize_reason_ = reason;
|
||||
}
|
||||
|
||||
BailoutReason dont_optimize_reason() const { return dont_optimize_reason_; }
|
||||
|
||||
Zone* zone() const { return zone_; }
|
||||
|
||||
Zone* zone_;
|
||||
Compiler::EagerInnerFunctionLiterals* eager_literals_;
|
||||
int suspend_count_;
|
||||
FunctionKind function_kind_;
|
||||
BailoutReason dont_optimize_reason_;
|
||||
|
||||
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
|
||||
DISALLOW_COPY_AND_ASSIGN(AstNumberingVisitor);
|
||||
@ -80,7 +70,6 @@ void AstNumberingVisitor::VisitDebuggerStatement(DebuggerStatement* node) {
|
||||
|
||||
void AstNumberingVisitor::VisitNativeFunctionLiteral(
|
||||
NativeFunctionLiteral* node) {
|
||||
DisableOptimization(kNativeFunctionLiteral);
|
||||
}
|
||||
|
||||
void AstNumberingVisitor::VisitDoExpression(DoExpression* node) {
|
||||
@ -206,6 +195,11 @@ void AstNumberingVisitor::VisitProperty(Property* node) {
|
||||
Visit(node->obj());
|
||||
}
|
||||
|
||||
void AstNumberingVisitor::VisitResolvedProperty(ResolvedProperty* node) {
|
||||
Visit(node->object());
|
||||
Visit(node->property());
|
||||
}
|
||||
|
||||
void AstNumberingVisitor::VisitAssignment(Assignment* node) {
|
||||
Visit(node->target());
|
||||
Visit(node->value());
|
||||
@ -262,6 +256,7 @@ void AstNumberingVisitor::VisitForInStatement(ForInStatement* node) {
|
||||
|
||||
void AstNumberingVisitor::VisitForOfStatement(ForOfStatement* node) {
|
||||
Visit(node->assign_iterator()); // Not part of loop.
|
||||
Visit(node->assign_next());
|
||||
node->set_first_suspend_id(suspend_count_);
|
||||
Visit(node->next_result());
|
||||
Visit(node->result_done());
|
||||
@ -326,11 +321,6 @@ void AstNumberingVisitor::VisitObjectLiteral(ObjectLiteral* node) {
|
||||
for (int i = 0; i < node->properties()->length(); i++) {
|
||||
VisitLiteralProperty(node->properties()->at(i));
|
||||
}
|
||||
node->InitDepthAndFlags();
|
||||
// Mark all computed expressions that are bound to a key that
|
||||
// is shadowed by a later occurrence of the same key. For the
|
||||
// marked expressions, no store code will be is emitted.
|
||||
node->CalculateEmitStore(zone_);
|
||||
}
|
||||
|
||||
void AstNumberingVisitor::VisitLiteralProperty(LiteralProperty* node) {
|
||||
@ -342,7 +332,6 @@ void AstNumberingVisitor::VisitArrayLiteral(ArrayLiteral* node) {
|
||||
for (int i = 0; i < node->values()->length(); i++) {
|
||||
Visit(node->values()->at(i));
|
||||
}
|
||||
node->InitDepthAndFlags();
|
||||
}
|
||||
|
||||
void AstNumberingVisitor::VisitCall(Call* node) {
|
||||
@ -402,7 +391,6 @@ bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
|
||||
VisitDeclarations(scope->declarations());
|
||||
VisitStatements(node->body());
|
||||
|
||||
node->set_dont_optimize_reason(dont_optimize_reason());
|
||||
node->set_suspend_count(suspend_count_);
|
||||
|
||||
return !HasStackOverflow();
|
||||
|
9
deps/v8/src/ast/ast-traversal-visitor.h
vendored
9
deps/v8/src/ast/ast-traversal-visitor.h
vendored
@ -243,6 +243,7 @@ void AstTraversalVisitor<Subclass>::VisitForStatement(ForStatement* stmt) {
|
||||
template <class Subclass>
|
||||
void AstTraversalVisitor<Subclass>::VisitForInStatement(ForInStatement* stmt) {
|
||||
PROCESS_NODE(stmt);
|
||||
RECURSE(Visit(stmt->each()));
|
||||
RECURSE(Visit(stmt->enumerable()));
|
||||
RECURSE(Visit(stmt->body()));
|
||||
}
|
||||
@ -391,6 +392,14 @@ void AstTraversalVisitor<Subclass>::VisitProperty(Property* expr) {
|
||||
RECURSE_EXPRESSION(Visit(expr->key()));
|
||||
}
|
||||
|
||||
template <class Subclass>
|
||||
void AstTraversalVisitor<Subclass>::VisitResolvedProperty(
|
||||
ResolvedProperty* expr) {
|
||||
PROCESS_EXPRESSION(expr);
|
||||
RECURSE_EXPRESSION(VisitVariableProxy(expr->object()));
|
||||
RECURSE_EXPRESSION(VisitVariableProxy(expr->property()));
|
||||
}
|
||||
|
||||
template <class Subclass>
|
||||
void AstTraversalVisitor<Subclass>::VisitCall(Call* expr) {
|
||||
PROCESS_EXPRESSION(expr);
|
||||
|
21
deps/v8/src/ast/ast.cc
vendored
21
deps/v8/src/ast/ast.cc
vendored
@ -514,18 +514,17 @@ bool ArrayLiteral::is_empty() const {
|
||||
}
|
||||
|
||||
int ArrayLiteral::InitDepthAndFlags() {
|
||||
DCHECK_LT(first_spread_index_, 0);
|
||||
if (is_initialized()) return depth();
|
||||
|
||||
int constants_length = values()->length();
|
||||
int constants_length =
|
||||
first_spread_index_ >= 0 ? first_spread_index_ : values()->length();
|
||||
|
||||
// Fill in the literals.
|
||||
bool is_simple = true;
|
||||
bool is_simple = first_spread_index_ < 0;
|
||||
int depth_acc = 1;
|
||||
int array_index = 0;
|
||||
for (; array_index < constants_length; array_index++) {
|
||||
Expression* element = values()->at(array_index);
|
||||
DCHECK(!element->IsSpread());
|
||||
MaterializedLiteral* literal = element->AsMaterializedLiteral();
|
||||
if (literal != nullptr) {
|
||||
int subliteral_depth = literal->InitDepthAndFlags() + 1;
|
||||
@ -546,11 +545,10 @@ int ArrayLiteral::InitDepthAndFlags() {
|
||||
}
|
||||
|
||||
void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
|
||||
DCHECK_LT(first_spread_index_, 0);
|
||||
|
||||
if (!constant_elements_.is_null()) return;
|
||||
|
||||
int constants_length = values()->length();
|
||||
int constants_length =
|
||||
first_spread_index_ >= 0 ? first_spread_index_ : values()->length();
|
||||
ElementsKind kind = FIRST_FAST_ELEMENTS_KIND;
|
||||
Handle<FixedArray> fixed_array =
|
||||
isolate->factory()->NewFixedArrayWithHoles(constants_length);
|
||||
@ -614,11 +612,6 @@ bool ArrayLiteral::IsFastCloningSupported() const {
|
||||
ConstructorBuiltins::kMaximumClonedShallowArrayElements;
|
||||
}
|
||||
|
||||
void ArrayLiteral::RewindSpreads() {
|
||||
values_->Rewind(first_spread_index_);
|
||||
first_spread_index_ = -1;
|
||||
}
|
||||
|
||||
bool MaterializedLiteral::IsSimple() const {
|
||||
if (IsArrayLiteral()) return AsArrayLiteral()->is_simple();
|
||||
if (IsObjectLiteral()) return AsObjectLiteral()->is_simple();
|
||||
@ -812,6 +805,10 @@ Call::CallType Call::GetCallType() const {
|
||||
}
|
||||
}
|
||||
|
||||
if (expression()->IsResolvedProperty()) {
|
||||
return RESOLVED_PROPERTY_CALL;
|
||||
}
|
||||
|
||||
return OTHER_CALL;
|
||||
}
|
||||
|
||||
|
71
deps/v8/src/ast/ast.h
vendored
71
deps/v8/src/ast/ast.h
vendored
@ -94,6 +94,7 @@ namespace internal {
|
||||
V(Literal) \
|
||||
V(NativeFunctionLiteral) \
|
||||
V(Property) \
|
||||
V(ResolvedProperty) \
|
||||
V(RewritableExpression) \
|
||||
V(Spread) \
|
||||
V(SuperCallReference) \
|
||||
@ -590,11 +591,13 @@ class ForInStatement final : public ForEachStatement {
|
||||
class ForOfStatement final : public ForEachStatement {
|
||||
public:
|
||||
void Initialize(Statement* body, Variable* iterator,
|
||||
Expression* assign_iterator, Expression* next_result,
|
||||
Expression* result_done, Expression* assign_each) {
|
||||
Expression* assign_iterator, Expression* assign_next,
|
||||
Expression* next_result, Expression* result_done,
|
||||
Expression* assign_each) {
|
||||
ForEachStatement::Initialize(body);
|
||||
iterator_ = iterator;
|
||||
assign_iterator_ = assign_iterator;
|
||||
assign_next_ = assign_next;
|
||||
next_result_ = next_result;
|
||||
result_done_ = result_done;
|
||||
assign_each_ = assign_each;
|
||||
@ -609,6 +612,9 @@ class ForOfStatement final : public ForEachStatement {
|
||||
return assign_iterator_;
|
||||
}
|
||||
|
||||
// iteratorRecord.next = iterator.next
|
||||
Expression* assign_next() const { return assign_next_; }
|
||||
|
||||
// result = iterator.next() // with type check
|
||||
Expression* next_result() const {
|
||||
return next_result_;
|
||||
@ -624,6 +630,12 @@ class ForOfStatement final : public ForEachStatement {
|
||||
return assign_each_;
|
||||
}
|
||||
|
||||
void set_assign_iterator(Expression* e) { assign_iterator_ = e; }
|
||||
void set_assign_next(Expression* e) { assign_next_ = e; }
|
||||
void set_next_result(Expression* e) { next_result_ = e; }
|
||||
void set_result_done(Expression* e) { result_done_ = e; }
|
||||
void set_assign_each(Expression* e) { assign_each_ = e; }
|
||||
|
||||
private:
|
||||
friend class AstNodeFactory;
|
||||
|
||||
@ -637,6 +649,7 @@ class ForOfStatement final : public ForEachStatement {
|
||||
|
||||
Variable* iterator_;
|
||||
Expression* assign_iterator_;
|
||||
Expression* assign_next_;
|
||||
Expression* next_result_;
|
||||
Expression* result_done_;
|
||||
Expression* assign_each_;
|
||||
@ -1450,22 +1463,23 @@ class ArrayLiteral final : public AggregateLiteral {
|
||||
}
|
||||
|
||||
// Provide a mechanism for iterating through values to rewrite spreads.
|
||||
ZoneList<Expression*>::iterator FirstSpread() const {
|
||||
ZoneList<Expression*>::iterator FirstSpreadOrEndValue() const {
|
||||
return (first_spread_index_ >= 0) ? values_->begin() + first_spread_index_
|
||||
: values_->end();
|
||||
}
|
||||
ZoneList<Expression*>::iterator BeginValue() const {
|
||||
return values_->begin();
|
||||
}
|
||||
ZoneList<Expression*>::iterator EndValue() const { return values_->end(); }
|
||||
|
||||
// Rewind an array literal omitting everything from the first spread on.
|
||||
void RewindSpreads();
|
||||
|
||||
private:
|
||||
friend class AstNodeFactory;
|
||||
|
||||
ArrayLiteral(ZoneList<Expression*>* values, int first_spread_index, int pos)
|
||||
: AggregateLiteral(pos, kArrayLiteral),
|
||||
first_spread_index_(first_spread_index),
|
||||
values_(values) {}
|
||||
values_(values) {
|
||||
}
|
||||
|
||||
int first_spread_index_;
|
||||
Handle<ConstantElementsPair> constant_elements_;
|
||||
@ -1606,6 +1620,25 @@ class Property final : public Expression {
|
||||
Expression* key_;
|
||||
};
|
||||
|
||||
// ResolvedProperty pairs a receiver field with a value field. It allows Call
|
||||
// to support arbitrary receivers while still taking advantage of TypeFeedback.
|
||||
class ResolvedProperty final : public Expression {
|
||||
public:
|
||||
VariableProxy* object() const { return object_; }
|
||||
VariableProxy* property() const { return property_; }
|
||||
|
||||
void set_object(VariableProxy* e) { object_ = e; }
|
||||
void set_property(VariableProxy* e) { property_ = e; }
|
||||
|
||||
private:
|
||||
friend class AstNodeFactory;
|
||||
|
||||
ResolvedProperty(VariableProxy* obj, VariableProxy* property, int pos)
|
||||
: Expression(pos, kResolvedProperty), object_(obj), property_(property) {}
|
||||
|
||||
VariableProxy* object_;
|
||||
VariableProxy* property_;
|
||||
};
|
||||
|
||||
class Call final : public Expression {
|
||||
public:
|
||||
@ -1632,6 +1665,7 @@ class Call final : public Expression {
|
||||
NAMED_SUPER_PROPERTY_CALL,
|
||||
KEYED_SUPER_PROPERTY_CALL,
|
||||
SUPER_CALL,
|
||||
RESOLVED_PROPERTY_CALL,
|
||||
OTHER_CALL
|
||||
};
|
||||
|
||||
@ -1697,11 +1731,10 @@ class CallNew final : public Expression {
|
||||
ZoneList<Expression*>* arguments_;
|
||||
};
|
||||
|
||||
|
||||
// The CallRuntime class does not represent any official JavaScript
|
||||
// language construct. Instead it is used to call a C or JS function
|
||||
// with a set of arguments. This is used from the builtins that are
|
||||
// implemented in JavaScript (see "v8natives.js").
|
||||
// implemented in JavaScript.
|
||||
class CallRuntime final : public Expression {
|
||||
public:
|
||||
ZoneList<Expression*>* arguments() const { return arguments_; }
|
||||
@ -2104,7 +2137,6 @@ class YieldStar final : public Suspend {
|
||||
// - One for awaiting the iterator result yielded by the delegated iterator
|
||||
// (await_delegated_iterator_output_suspend_id)
|
||||
int await_iterator_close_suspend_id() const {
|
||||
DCHECK_NE(-1, await_iterator_close_suspend_id_);
|
||||
return await_iterator_close_suspend_id_;
|
||||
}
|
||||
void set_await_iterator_close_suspend_id(int id) {
|
||||
@ -2112,7 +2144,6 @@ class YieldStar final : public Suspend {
|
||||
}
|
||||
|
||||
int await_delegated_iterator_output_suspend_id() const {
|
||||
DCHECK_NE(-1, await_delegated_iterator_output_suspend_id_);
|
||||
return await_delegated_iterator_output_suspend_id_;
|
||||
}
|
||||
void set_await_delegated_iterator_output_suspend_id(int id) {
|
||||
@ -2168,7 +2199,8 @@ class FunctionLiteral final : public Expression {
|
||||
kAnonymousExpression,
|
||||
kNamedExpression,
|
||||
kDeclaration,
|
||||
kAccessorOrMethod
|
||||
kAccessorOrMethod,
|
||||
kWrapped,
|
||||
};
|
||||
|
||||
enum IdType { kIdTypeInvalid = -1, kIdTypeTopLevel = 0 };
|
||||
@ -2199,6 +2231,7 @@ class FunctionLiteral final : public Expression {
|
||||
bool is_anonymous_expression() const {
|
||||
return function_type() == kAnonymousExpression;
|
||||
}
|
||||
bool is_wrapped() const { return function_type() == kWrapped; }
|
||||
LanguageMode language_mode() const;
|
||||
|
||||
static bool NeedsHomeObject(Expression* expr);
|
||||
@ -2274,7 +2307,9 @@ class FunctionLiteral final : public Expression {
|
||||
}
|
||||
FunctionKind kind() const;
|
||||
|
||||
bool dont_optimize() { return dont_optimize_reason() != kNoReason; }
|
||||
bool dont_optimize() {
|
||||
return dont_optimize_reason() != BailoutReason::kNoReason;
|
||||
}
|
||||
BailoutReason dont_optimize_reason() {
|
||||
return DontOptimizeReasonField::decode(bit_field_);
|
||||
}
|
||||
@ -2337,14 +2372,14 @@ class FunctionLiteral final : public Expression {
|
||||
Pretenure::encode(false) |
|
||||
HasDuplicateParameters::encode(has_duplicate_parameters ==
|
||||
kHasDuplicateParameters) |
|
||||
DontOptimizeReasonField::encode(kNoReason) |
|
||||
DontOptimizeReasonField::encode(BailoutReason::kNoReason) |
|
||||
RequiresInstanceFieldsInitializer::encode(false);
|
||||
if (eager_compile_hint == kShouldEagerCompile) SetShouldEagerCompile();
|
||||
DCHECK_EQ(body == nullptr, expected_property_count < 0);
|
||||
}
|
||||
|
||||
class FunctionTypeBits
|
||||
: public BitField<FunctionType, Expression::kNextBitFieldIndex, 2> {};
|
||||
: public BitField<FunctionType, Expression::kNextBitFieldIndex, 3> {};
|
||||
class Pretenure : public BitField<bool, FunctionTypeBits::kNext, 1> {};
|
||||
class HasDuplicateParameters : public BitField<bool, Pretenure::kNext, 1> {};
|
||||
class DontOptimizeReasonField
|
||||
@ -2993,6 +3028,12 @@ class AstNodeFactory final BASE_EMBEDDED {
|
||||
return new (zone_) Property(obj, key, pos);
|
||||
}
|
||||
|
||||
ResolvedProperty* NewResolvedProperty(VariableProxy* obj,
|
||||
VariableProxy* property,
|
||||
int pos = kNoSourcePosition) {
|
||||
return new (zone_) ResolvedProperty(obj, property, pos);
|
||||
}
|
||||
|
||||
Call* NewCall(Expression* expression, ZoneList<Expression*>* arguments,
|
||||
int pos, Call::PossiblyEval possibly_eval = Call::NOT_EVAL) {
|
||||
return new (zone_) Call(expression, arguments, pos, possibly_eval);
|
||||
|
43
deps/v8/src/ast/prettyprinter.cc
vendored
43
deps/v8/src/ast/prettyprinter.cc
vendored
@ -26,6 +26,7 @@ CallPrinter::CallPrinter(Isolate* isolate, bool is_user_js)
|
||||
is_iterator_error_ = false;
|
||||
is_async_iterator_error_ = false;
|
||||
is_user_js_ = is_user_js;
|
||||
function_kind_ = kNormalFunction;
|
||||
InitializeAstVisitor(isolate);
|
||||
}
|
||||
|
||||
@ -187,7 +188,10 @@ void CallPrinter::VisitDebuggerStatement(DebuggerStatement* node) {}
|
||||
|
||||
|
||||
void CallPrinter::VisitFunctionLiteral(FunctionLiteral* node) {
|
||||
FunctionKind last_function_kind = function_kind_;
|
||||
function_kind_ = node->kind();
|
||||
FindStatements(node->body());
|
||||
function_kind_ = last_function_kind;
|
||||
}
|
||||
|
||||
|
||||
@ -250,7 +254,17 @@ void CallPrinter::VisitArrayLiteral(ArrayLiteral* node) {
|
||||
Print("[");
|
||||
for (int i = 0; i < node->values()->length(); i++) {
|
||||
if (i != 0) Print(",");
|
||||
Find(node->values()->at(i), true);
|
||||
Expression* subexpr = node->values()->at(i);
|
||||
Spread* spread = subexpr->AsSpread();
|
||||
if (spread != nullptr && !found_ &&
|
||||
position_ == spread->expression()->position()) {
|
||||
found_ = true;
|
||||
is_iterator_error_ = true;
|
||||
Find(spread->expression(), true);
|
||||
done_ = true;
|
||||
return;
|
||||
}
|
||||
Find(subexpr, true);
|
||||
}
|
||||
Print("]");
|
||||
}
|
||||
@ -277,7 +291,17 @@ void CallPrinter::VisitCompoundAssignment(CompoundAssignment* node) {
|
||||
|
||||
void CallPrinter::VisitYield(Yield* node) { Find(node->expression()); }
|
||||
|
||||
void CallPrinter::VisitYieldStar(YieldStar* node) { Find(node->expression()); }
|
||||
void CallPrinter::VisitYieldStar(YieldStar* node) {
|
||||
if (!found_ && position_ == node->expression()->position()) {
|
||||
found_ = true;
|
||||
if (IsAsyncFunction(function_kind_))
|
||||
is_async_iterator_error_ = true;
|
||||
else
|
||||
is_iterator_error_ = true;
|
||||
Print("yield* ");
|
||||
}
|
||||
Find(node->expression());
|
||||
}
|
||||
|
||||
void CallPrinter::VisitAwait(Await* node) { Find(node->expression()); }
|
||||
|
||||
@ -302,6 +326,7 @@ void CallPrinter::VisitProperty(Property* node) {
|
||||
}
|
||||
}
|
||||
|
||||
void CallPrinter::VisitResolvedProperty(ResolvedProperty* node) {}
|
||||
|
||||
void CallPrinter::VisitCall(Call* node) {
|
||||
bool was_found = false;
|
||||
@ -960,8 +985,10 @@ void AstPrinter::VisitTryCatchStatement(TryCatchStatement* node) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
Print(" %s\n", prediction);
|
||||
PrintLiteralWithModeIndented("CATCHVAR", node->scope()->catch_variable(),
|
||||
node->scope()->catch_variable()->raw_name());
|
||||
if (node->scope()) {
|
||||
PrintLiteralWithModeIndented("CATCHVAR", node->scope()->catch_variable(),
|
||||
node->scope()->catch_variable()->raw_name());
|
||||
}
|
||||
PrintIndentedVisit("CATCH", node->catch_block());
|
||||
}
|
||||
|
||||
@ -1223,6 +1250,14 @@ void AstPrinter::VisitProperty(Property* node) {
|
||||
}
|
||||
}
|
||||
|
||||
void AstPrinter::VisitResolvedProperty(ResolvedProperty* node) {
|
||||
EmbeddedVector<char, 128> buf;
|
||||
SNPrintF(buf, "RESOLVED-PROPERTY");
|
||||
IndentedScope indent(this, buf.start(), node->position());
|
||||
|
||||
PrintIndentedVisit("RECEIVER", node->object());
|
||||
PrintIndentedVisit("PROPERTY", node->property());
|
||||
}
|
||||
|
||||
void AstPrinter::VisitCall(Call* node) {
|
||||
EmbeddedVector<char, 128> buf;
|
||||
|
1
deps/v8/src/ast/prettyprinter.h
vendored
1
deps/v8/src/ast/prettyprinter.h
vendored
@ -50,6 +50,7 @@ class CallPrinter final : public AstVisitor<CallPrinter> {
|
||||
bool is_iterator_error_;
|
||||
bool is_async_iterator_error_;
|
||||
bool is_call_error_;
|
||||
FunctionKind function_kind_;
|
||||
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
|
||||
|
||||
protected:
|
||||
|
26
deps/v8/src/ast/scopes.cc
vendored
26
deps/v8/src/ast/scopes.cc
vendored
@ -147,8 +147,6 @@ Scope::Scope(Zone* zone, Scope* outer_scope, ScopeType scope_type)
|
||||
DCHECK_NE(SCRIPT_SCOPE, scope_type);
|
||||
SetDefaults();
|
||||
set_language_mode(outer_scope->language_mode());
|
||||
force_context_allocation_ =
|
||||
!is_function_scope() && outer_scope->has_forced_context_allocation();
|
||||
outer_scope_->AddInnerScope(this);
|
||||
}
|
||||
|
||||
@ -649,8 +647,8 @@ void DeclarationScope::Analyze(ParseInfo* info) {
|
||||
RuntimeCallTimerScope runtimeTimer(
|
||||
info->runtime_call_stats(),
|
||||
info->on_background_thread()
|
||||
? &RuntimeCallStats::CompileBackgroundScopeAnalysis
|
||||
: &RuntimeCallStats::CompileScopeAnalysis);
|
||||
? RuntimeCallCounterId::kCompileBackgroundScopeAnalysis
|
||||
: RuntimeCallCounterId::kCompileScopeAnalysis);
|
||||
DCHECK_NOT_NULL(info->literal());
|
||||
DeclarationScope* scope = info->literal()->scope();
|
||||
|
||||
@ -1370,12 +1368,8 @@ bool Scope::AllowsLazyParsingWithoutUnresolvedVariables(
|
||||
if (s->is_catch_scope()) continue;
|
||||
// With scopes do not introduce variables that need allocation.
|
||||
if (s->is_with_scope()) continue;
|
||||
// Module scopes context-allocate all variables, and have no
|
||||
// {this} or {arguments} variables whose existence depends on
|
||||
// references to them.
|
||||
if (s->is_module_scope()) continue;
|
||||
// Only block scopes and function scopes should disallow preparsing.
|
||||
DCHECK(s->is_block_scope() || s->is_function_scope());
|
||||
DCHECK(s->is_module_scope() || s->is_block_scope() ||
|
||||
s->is_function_scope());
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@ -1443,6 +1437,10 @@ bool Scope::NeedsScopeInfo() const {
|
||||
return NeedsContext();
|
||||
}
|
||||
|
||||
bool Scope::ShouldBanArguments() {
|
||||
return GetReceiverScope()->should_ban_arguments();
|
||||
}
|
||||
|
||||
DeclarationScope* Scope::GetReceiverScope() {
|
||||
Scope* scope = this;
|
||||
while (!scope->is_script_scope() &&
|
||||
@ -1734,9 +1732,6 @@ void Scope::Print(int n) {
|
||||
if (scope->was_lazily_parsed()) Indent(n1, "// lazily parsed\n");
|
||||
if (scope->ShouldEagerCompile()) Indent(n1, "// will be compiled\n");
|
||||
}
|
||||
if (has_forced_context_allocation()) {
|
||||
Indent(n1, "// forces context allocation\n");
|
||||
}
|
||||
if (num_stack_slots_ > 0) {
|
||||
Indent(n1, "// ");
|
||||
PrintF("%d stack slots\n", num_stack_slots_);
|
||||
@ -2111,11 +2106,8 @@ bool Scope::MustAllocateInContext(Variable* var) {
|
||||
// an eval() call or a runtime with lookup), it must be allocated in the
|
||||
// context.
|
||||
//
|
||||
// Exceptions: If the scope as a whole has forced context allocation, all
|
||||
// variables will have context allocation, even temporaries. Otherwise
|
||||
// temporary variables are always stack-allocated. Catch-bound variables are
|
||||
// Temporary variables are always stack-allocated. Catch-bound variables are
|
||||
// always context-allocated.
|
||||
if (has_forced_context_allocation()) return true;
|
||||
if (var->mode() == TEMPORARY) return false;
|
||||
if (is_catch_scope()) return true;
|
||||
if ((is_script_scope() || is_eval_scope()) &&
|
||||
|
14
deps/v8/src/ast/scopes.h
vendored
14
deps/v8/src/ast/scopes.h
vendored
@ -334,14 +334,6 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
|
||||
bool is_hidden() const { return is_hidden_; }
|
||||
void set_is_hidden() { is_hidden_ = true; }
|
||||
|
||||
// In some cases we want to force context allocation for a whole scope.
|
||||
void ForceContextAllocation() {
|
||||
DCHECK(!already_resolved_);
|
||||
force_context_allocation_ = true;
|
||||
}
|
||||
bool has_forced_context_allocation() const {
|
||||
return force_context_allocation_;
|
||||
}
|
||||
void ForceContextAllocationForParameters() {
|
||||
DCHECK(!already_resolved_);
|
||||
force_context_allocation_for_parameters_ = true;
|
||||
@ -404,6 +396,8 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
|
||||
return static_cast<Variable*>(variables_.Start()->value);
|
||||
}
|
||||
|
||||
bool ShouldBanArguments();
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Variable allocation.
|
||||
|
||||
@ -704,6 +698,10 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
|
||||
bool asm_module() const { return asm_module_; }
|
||||
void set_asm_module();
|
||||
|
||||
bool should_ban_arguments() const {
|
||||
return IsClassFieldsInitializerFunction(function_kind());
|
||||
}
|
||||
|
||||
void DeclareThis(AstValueFactory* ast_value_factory);
|
||||
void DeclareArguments(AstValueFactory* ast_value_factory);
|
||||
void DeclareDefaultFunctionVariables(AstValueFactory* ast_value_factory);
|
||||
|
21
deps/v8/src/bailout-reason.cc
vendored
21
deps/v8/src/bailout-reason.cc
vendored
@ -8,13 +8,24 @@
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
const char* GetBailoutReason(BailoutReason reason) {
|
||||
DCHECK_LT(reason, kLastErrorMessage);
|
||||
#define ERROR_MESSAGES_TEXTS(C, T) T,
|
||||
|
||||
const char* GetBailoutReason(BailoutReason reason) {
|
||||
DCHECK_LT(reason, BailoutReason::kLastErrorMessage);
|
||||
DCHECK_GE(reason, BailoutReason::kNoReason);
|
||||
static const char* error_messages_[] = {
|
||||
ERROR_MESSAGES_LIST(ERROR_MESSAGES_TEXTS)};
|
||||
#undef ERROR_MESSAGES_TEXTS
|
||||
return error_messages_[reason];
|
||||
BAILOUT_MESSAGES_LIST(ERROR_MESSAGES_TEXTS)};
|
||||
return error_messages_[static_cast<int>(reason)];
|
||||
}
|
||||
|
||||
const char* GetAbortReason(AbortReason reason) {
|
||||
DCHECK_LT(reason, AbortReason::kLastErrorMessage);
|
||||
DCHECK_GE(reason, AbortReason::kNoReason);
|
||||
static const char* error_messages_[] = {
|
||||
ABORT_MESSAGES_LIST(ERROR_MESSAGES_TEXTS)};
|
||||
return error_messages_[static_cast<int>(reason)];
|
||||
}
|
||||
|
||||
#undef ERROR_MESSAGES_TEXTS
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
125
deps/v8/src/bailout-reason.h
vendored
125
deps/v8/src/bailout-reason.h
vendored
@ -8,158 +8,117 @@
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
// TODO(svenpanne) introduce an AbortReason and partition this list
|
||||
#define ERROR_MESSAGES_LIST(V) \
|
||||
#define ABORT_MESSAGES_LIST(V) \
|
||||
V(kNoReason, "no reason") \
|
||||
\
|
||||
V(k32BitValueInRegisterIsNotZeroExtended, \
|
||||
"32 bit value in register is not zero-extended") \
|
||||
V(kAPICallReturnedInvalidObject, "API call returned invalid object") \
|
||||
V(kAllocatingNonEmptyPackedArray, "Allocating non-empty packed array") \
|
||||
V(kAllocationIsNotDoubleAligned, "Allocation is not double aligned") \
|
||||
V(kAPICallReturnedInvalidObject, "API call returned invalid object") \
|
||||
V(kBailedOutDueToDependencyChange, "Bailed out due to dependency change") \
|
||||
V(kClassConstructorFunction, "Class constructor function") \
|
||||
V(kClassLiteral, "Class literal") \
|
||||
V(kCodeGenerationFailed, "Code generation failed") \
|
||||
V(kCodeObjectNotProperlyPatched, "Code object not properly patched") \
|
||||
V(kComputedPropertyName, "Computed property name") \
|
||||
V(kContextAllocatedArguments, "Context-allocated arguments") \
|
||||
V(kDebuggerStatement, "DebuggerStatement") \
|
||||
V(kDeclarationInCatchContext, "Declaration in catch context") \
|
||||
V(kDeclarationInWithContext, "Declaration in with context") \
|
||||
V(kDynamicImport, "Dynamic module import") \
|
||||
V(kCyclicObjectStateDetectedInEscapeAnalysis, \
|
||||
"Cyclic object state detected by escape analysis") \
|
||||
V(kEval, "eval") \
|
||||
V(kExpectedAllocationSite, "Expected allocation site") \
|
||||
V(kExpectedBooleanValue, "Expected boolean value") \
|
||||
V(kExpectedFeedbackVector, "Expected feedback vector") \
|
||||
V(kExpectedHeapNumber, "Expected HeapNumber") \
|
||||
V(kExpectedNonIdenticalObjects, "Expected non-identical objects") \
|
||||
V(kExpectedOptimizationSentinel, \
|
||||
"Expected optimized code cell or optimization sentinel") \
|
||||
V(kExpectedNewSpaceObject, "Expected new space object") \
|
||||
V(kExpectedUndefinedOrCell, "Expected undefined or cell in register") \
|
||||
V(kForOfStatement, "ForOfStatement") \
|
||||
V(kFunctionBeingDebugged, "Function is being debugged") \
|
||||
V(kFunctionCallsEval, "Function calls eval") \
|
||||
V(kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, \
|
||||
"The function_data field should be a BytecodeArray on interpreter entry") \
|
||||
V(kGenerator, "Generator") \
|
||||
V(kGetIterator, "GetIterator") \
|
||||
V(kGraphBuildingFailed, "Optimized graph construction failed") \
|
||||
V(kHeapNumberMapRegisterClobbered, "HeapNumberMap register clobbered") \
|
||||
V(kIndexIsNegative, "Index is negative") \
|
||||
V(kIndexIsTooLarge, "Index is too large") \
|
||||
V(kInputGPRIsExpectedToHaveUpper32Cleared, \
|
||||
"Input GPR is expected to have upper32 cleared") \
|
||||
V(kInputStringTooLong, "Input string too long") \
|
||||
V(kInvalidBytecode, "Invalid bytecode") \
|
||||
V(kInvalidElementsKindForInternalArrayOrInternalPackedArray, \
|
||||
"Invalid ElementsKind for InternalArray or InternalPackedArray") \
|
||||
V(kInvalidFullCodegenState, "invalid full-codegen state") \
|
||||
V(kInvalidHandleScopeLevel, "Invalid HandleScope level") \
|
||||
V(kInvalidJumpTableIndex, "Invalid jump table index") \
|
||||
V(kInvalidRegisterFileInGenerator, "invalid register file in generator") \
|
||||
V(kLiveEdit, "LiveEdit") \
|
||||
V(kMissingBytecodeArray, "Missing bytecode array from function") \
|
||||
V(kNativeFunctionLiteral, "Native function literal") \
|
||||
V(kNoCasesLeft, "No cases left") \
|
||||
V(kNonObject, "Non-object value") \
|
||||
V(kNotEnoughVirtualRegistersRegalloc, \
|
||||
"Not enough virtual registers (regalloc)") \
|
||||
V(kObjectNotTagged, "The object is not tagged") \
|
||||
V(kObjectTagged, "The object is tagged") \
|
||||
V(kOffsetOutOfRange, "Offset out of range") \
|
||||
V(kOperandIsASmi, "Operand is a smi") \
|
||||
V(kOperandIsASmiAndNotABoundFunction, \
|
||||
"Operand is a smi and not a bound function") \
|
||||
V(kOperandIsASmiAndNotAFixedArray, "Operand is a smi and not a fixed array") \
|
||||
V(kOperandIsASmiAndNotAFunction, "Operand is a smi and not a function") \
|
||||
V(kOperandIsASmiAndNotAGeneratorObject, \
|
||||
"Operand is a smi and not a generator object") \
|
||||
V(kOperandIsASmi, "Operand is a smi") \
|
||||
V(kOperandIsNotABoundFunction, "Operand is not a bound function") \
|
||||
V(kOperandIsNotAFixedArray, "Operand is not a fixed array") \
|
||||
V(kOperandIsNotAFunction, "Operand is not a function") \
|
||||
V(kOperandIsNotAGeneratorObject, "Operand is not a generator object") \
|
||||
V(kOperandIsNotASmi, "Operand is not a smi") \
|
||||
V(kOperandIsNotSmi, "Operand is not smi") \
|
||||
V(kObjectTagged, "The object is tagged") \
|
||||
V(kObjectNotTagged, "The object is not tagged") \
|
||||
V(kOptimizationDisabled, "Optimization disabled") \
|
||||
V(kOptimizationDisabledForTest, "Optimization disabled for test") \
|
||||
V(kReceivedInvalidReturnAddress, "Received invalid return address") \
|
||||
V(kReferenceToAVariableWhichRequiresDynamicLookup, \
|
||||
"Reference to a variable which requires dynamic lookup") \
|
||||
V(kReferenceToModuleVariable, "Reference to module-allocated variable") \
|
||||
V(kRegisterDidNotMatchExpectedRoot, "Register did not match expected root") \
|
||||
V(kRegisterWasClobbered, "Register was clobbered") \
|
||||
V(kRememberedSetPointerInNewSpace, "Remembered set pointer is in new space") \
|
||||
V(kRestParameter, "Rest parameters") \
|
||||
V(kReturnAddressNotFoundInFrame, "Return address not found in frame") \
|
||||
V(kSpreadCall, "Call with spread argument") \
|
||||
V(kShouldNotDirectlyEnterOsrFunction, \
|
||||
"Should not directly enter OSR-compiled function") \
|
||||
V(kStackAccessBelowStackPointer, "Stack access below stack pointer") \
|
||||
V(kStackFrameTypesMustMatch, "Stack frame types must match") \
|
||||
V(kSuperReference, "Super reference") \
|
||||
V(kTailCall, "Tail call") \
|
||||
V(kTheCurrentStackPointerIsBelowCsp, \
|
||||
"The current stack pointer is below csp") \
|
||||
V(kTheStackWasCorruptedByMacroAssemblerCall, \
|
||||
"The stack was corrupted by MacroAssembler::Call()") \
|
||||
V(kTooManyParameters, "Too many parameters") \
|
||||
V(kTryCatchStatement, "TryCatchStatement") \
|
||||
V(kTryFinallyStatement, "TryFinallyStatement") \
|
||||
V(kUnalignedAllocationInNewSpace, "Unaligned allocation in new space") \
|
||||
V(kUnalignedCellInWriteBarrier, "Unaligned cell in write barrier") \
|
||||
V(kUnexpectedColorFound, "Unexpected color bit pattern found") \
|
||||
V(kUnexpectedElementsKindInArrayConstructor, \
|
||||
"Unexpected ElementsKind in array constructor") \
|
||||
V(kUnexpectedFallthroughFromCharCodeAtSlowCase, \
|
||||
"Unexpected fallthrough from CharCodeAt slow case") \
|
||||
V(kUnexpectedFallThroughFromStringComparison, \
|
||||
"Unexpected fall-through from string comparison") \
|
||||
V(kUnexpectedFallthroughToCharCodeAtSlowCase, \
|
||||
"Unexpected fallthrough to CharCodeAt slow case") \
|
||||
V(kUnexpectedFPCRMode, "Unexpected FPCR mode.") \
|
||||
V(kUnexpectedFunctionIDForInvokeIntrinsic, \
|
||||
"Unexpected runtime function id for the InvokeIntrinsic bytecode") \
|
||||
V(kUnexpectedInitialMapForArrayFunction, \
|
||||
"Unexpected initial map for Array function") \
|
||||
V(kUnexpectedInitialMapForArrayFunction1, \
|
||||
"Unexpected initial map for Array function (1)") \
|
||||
V(kUnexpectedInitialMapForArrayFunction2, \
|
||||
"Unexpected initial map for Array function (2)") \
|
||||
V(kUnexpectedInitialMapForArrayFunction, \
|
||||
"Unexpected initial map for Array function") \
|
||||
V(kUnexpectedInitialMapForInternalArrayFunction, \
|
||||
"Unexpected initial map for InternalArray function") \
|
||||
V(kUnexpectedLevelAfterReturnFromApiCall, \
|
||||
"Unexpected level after return from api call") \
|
||||
V(kUnexpectedNegativeValue, "Unexpected negative value") \
|
||||
V(kUnexpectedFunctionIDForInvokeIntrinsic, \
|
||||
"Unexpected runtime function id for the InvokeIntrinsic bytecode") \
|
||||
V(kUnexpectedFPCRMode, "Unexpected FPCR mode.") \
|
||||
V(kUnexpectedStackDepth, "Unexpected operand stack depth in full-codegen") \
|
||||
V(kUnexpectedStackPointer, "The stack pointer is not the expected value") \
|
||||
V(kUnexpectedStringType, "Unexpected string type") \
|
||||
V(kUnexpectedValue, "Unexpected value") \
|
||||
V(kUnsupportedModuleOperation, "Unsupported module operation") \
|
||||
V(kUnsupportedNonPrimitiveCompare, "Unsupported non-primitive compare") \
|
||||
V(kUnexpectedReturnFromFrameDropper, \
|
||||
"Unexpectedly returned from dropping frames") \
|
||||
V(kUnexpectedReturnFromThrow, "Unexpectedly returned from a throw") \
|
||||
V(kVariableResolvedToWithContext, "Variable resolved to with context") \
|
||||
V(kWithStatement, "WithStatement") \
|
||||
V(kWrongFunctionContext, "Wrong context passed to function") \
|
||||
V(kUnexpectedReturnFromWasmTrap, \
|
||||
"Should not return after throwing a wasm trap") \
|
||||
V(kUnexpectedStackPointer, "The stack pointer is not the expected value") \
|
||||
V(kUnexpectedValue, "Unexpected value") \
|
||||
V(kUnsupportedModuleOperation, "Unsupported module operation") \
|
||||
V(kUnsupportedNonPrimitiveCompare, "Unsupported non-primitive compare") \
|
||||
V(kWrongAddressOrValuePassedToRecordWrite, \
|
||||
"Wrong address or value passed to RecordWrite") \
|
||||
V(kWrongArgumentCountForInvokeIntrinsic, \
|
||||
"Wrong number of arguments for intrinsic") \
|
||||
V(kShouldNotDirectlyEnterOsrFunction, \
|
||||
"Should not directly enter OSR-compiled function") \
|
||||
V(kUnexpectedReturnFromWasmTrap, \
|
||||
"Should not return after throwing a wasm trap")
|
||||
V(kWrongFunctionContext, "Wrong context passed to function")
|
||||
|
||||
#define BAILOUT_MESSAGES_LIST(V) \
|
||||
V(kNoReason, "no reason") \
|
||||
\
|
||||
V(kBailedOutDueToDependencyChange, "Bailed out due to dependency change") \
|
||||
V(kCodeGenerationFailed, "Code generation failed") \
|
||||
V(kCyclicObjectStateDetectedInEscapeAnalysis, \
|
||||
"Cyclic object state detected by escape analysis") \
|
||||
V(kFunctionBeingDebugged, "Function is being debugged") \
|
||||
V(kGraphBuildingFailed, "Optimized graph construction failed") \
|
||||
V(kLiveEdit, "LiveEdit") \
|
||||
V(kNativeFunctionLiteral, "Native function literal") \
|
||||
V(kNotEnoughVirtualRegistersRegalloc, \
|
||||
"Not enough virtual registers (regalloc)") \
|
||||
V(kOptimizationDisabled, "Optimization disabled") \
|
||||
V(kOptimizationDisabledForTest, "Optimization disabled for test")
|
||||
|
||||
#define ERROR_MESSAGES_CONSTANTS(C, T) C,
|
||||
enum BailoutReason {
|
||||
ERROR_MESSAGES_LIST(ERROR_MESSAGES_CONSTANTS) kLastErrorMessage
|
||||
enum class BailoutReason {
|
||||
BAILOUT_MESSAGES_LIST(ERROR_MESSAGES_CONSTANTS) kLastErrorMessage
|
||||
};
|
||||
|
||||
enum class AbortReason {
|
||||
ABORT_MESSAGES_LIST(ERROR_MESSAGES_CONSTANTS) kLastErrorMessage
|
||||
};
|
||||
#undef ERROR_MESSAGES_CONSTANTS
|
||||
|
||||
const char* GetBailoutReason(BailoutReason reason);
|
||||
const char* GetAbortReason(AbortReason reason);
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
1
deps/v8/src/base/DEPS
vendored
1
deps/v8/src/base/DEPS
vendored
@ -1,6 +1,7 @@
|
||||
include_rules = [
|
||||
"-include",
|
||||
"+include/v8config.h",
|
||||
"+include/v8-platform.h",
|
||||
"-src",
|
||||
"+src/base",
|
||||
]
|
||||
|
20
deps/v8/src/base/cpu.cc
vendored
20
deps/v8/src/base/cpu.cc
vendored
@ -356,12 +356,12 @@ CPU::CPU()
|
||||
// Interpret CPU feature information.
|
||||
if (num_ids > 0) {
|
||||
__cpuid(cpu_info, 1);
|
||||
stepping_ = cpu_info[0] & 0xf;
|
||||
model_ = ((cpu_info[0] >> 4) & 0xf) + ((cpu_info[0] >> 12) & 0xf0);
|
||||
family_ = (cpu_info[0] >> 8) & 0xf;
|
||||
stepping_ = cpu_info[0] & 0xF;
|
||||
model_ = ((cpu_info[0] >> 4) & 0xF) + ((cpu_info[0] >> 12) & 0xF0);
|
||||
family_ = (cpu_info[0] >> 8) & 0xF;
|
||||
type_ = (cpu_info[0] >> 12) & 0x3;
|
||||
ext_model_ = (cpu_info[0] >> 16) & 0xf;
|
||||
ext_family_ = (cpu_info[0] >> 20) & 0xff;
|
||||
ext_model_ = (cpu_info[0] >> 16) & 0xF;
|
||||
ext_family_ = (cpu_info[0] >> 20) & 0xFF;
|
||||
has_fpu_ = (cpu_info[3] & 0x00000001) != 0;
|
||||
has_cmov_ = (cpu_info[3] & 0x00008000) != 0;
|
||||
has_mmx_ = (cpu_info[3] & 0x00800000) != 0;
|
||||
@ -378,16 +378,16 @@ CPU::CPU()
|
||||
|
||||
if (family_ == 0x6) {
|
||||
switch (model_) {
|
||||
case 0x1c: // SLT
|
||||
case 0x1C: // SLT
|
||||
case 0x26:
|
||||
case 0x36:
|
||||
case 0x27:
|
||||
case 0x35:
|
||||
case 0x37: // SLM
|
||||
case 0x4a:
|
||||
case 0x4d:
|
||||
case 0x4c: // AMT
|
||||
case 0x6e:
|
||||
case 0x4A:
|
||||
case 0x4D:
|
||||
case 0x4C: // AMT
|
||||
case 0x6E:
|
||||
is_atom_ = true;
|
||||
}
|
||||
}
|
||||
|
8
deps/v8/src/base/debug/stack_trace_posix.cc
vendored
8
deps/v8/src/base/debug/stack_trace_posix.cc
vendored
@ -400,7 +400,7 @@ char* itoa_r(intptr_t i, char* buf, size_t sz, int base, size_t padding) {
|
||||
if (n > sz) return nullptr;
|
||||
|
||||
if (base < 2 || base > 16) {
|
||||
buf[0] = '\000';
|
||||
buf[0] = '\0';
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@ -415,7 +415,7 @@ char* itoa_r(intptr_t i, char* buf, size_t sz, int base, size_t padding) {
|
||||
|
||||
// Make sure we can write the '-' character.
|
||||
if (++n > sz) {
|
||||
buf[0] = '\000';
|
||||
buf[0] = '\0';
|
||||
return nullptr;
|
||||
}
|
||||
*start++ = '-';
|
||||
@ -427,7 +427,7 @@ char* itoa_r(intptr_t i, char* buf, size_t sz, int base, size_t padding) {
|
||||
do {
|
||||
// Make sure there is still enough space left in our output buffer.
|
||||
if (++n > sz) {
|
||||
buf[0] = '\000';
|
||||
buf[0] = '\0';
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@ -439,7 +439,7 @@ char* itoa_r(intptr_t i, char* buf, size_t sz, int base, size_t padding) {
|
||||
} while (j > 0 || padding > 0);
|
||||
|
||||
// Terminate the output with a NUL character.
|
||||
*ptr = '\000';
|
||||
*ptr = '\0';
|
||||
|
||||
// Conversion to ASCII actually resulted in the digits being in reverse
|
||||
// order. We can't easily generate them in forward order, as we can't tell
|
||||
|
8
deps/v8/src/base/functional.cc
vendored
8
deps/v8/src/base/functional.cc
vendored
@ -69,8 +69,8 @@ V8_INLINE size_t hash_value_unsigned(T v) {
|
||||
// This code was taken from MurmurHash.
|
||||
size_t hash_combine(size_t seed, size_t value) {
|
||||
#if V8_HOST_ARCH_32_BIT
|
||||
const uint32_t c1 = 0xcc9e2d51;
|
||||
const uint32_t c2 = 0x1b873593;
|
||||
const uint32_t c1 = 0xCC9E2D51;
|
||||
const uint32_t c2 = 0x1B873593;
|
||||
|
||||
value *= c1;
|
||||
value = bits::RotateRight32(value, 15);
|
||||
@ -78,9 +78,9 @@ size_t hash_combine(size_t seed, size_t value) {
|
||||
|
||||
seed ^= value;
|
||||
seed = bits::RotateRight32(seed, 13);
|
||||
seed = seed * 5 + 0xe6546b64;
|
||||
seed = seed * 5 + 0xE6546B64;
|
||||
#else
|
||||
const uint64_t m = V8_UINT64_C(0xc6a4a7935bd1e995);
|
||||
const uint64_t m = uint64_t{0xC6A4A7935BD1E995};
|
||||
const uint32_t r = 47;
|
||||
|
||||
value *= m;
|
||||
|
282
deps/v8/src/base/ieee754.cc
vendored
282
deps/v8/src/base/ieee754.cc
vendored
@ -225,16 +225,16 @@ int32_t __ieee754_rem_pio2(double x, double *y) {
|
||||
|
||||
z = 0;
|
||||
GET_HIGH_WORD(hx, x); /* high word of x */
|
||||
ix = hx & 0x7fffffff;
|
||||
if (ix <= 0x3fe921fb) { /* |x| ~<= pi/4 , no need for reduction */
|
||||
ix = hx & 0x7FFFFFFF;
|
||||
if (ix <= 0x3FE921FB) { /* |x| ~<= pi/4 , no need for reduction */
|
||||
y[0] = x;
|
||||
y[1] = 0;
|
||||
return 0;
|
||||
}
|
||||
if (ix < 0x4002d97c) { /* |x| < 3pi/4, special case with n=+-1 */
|
||||
if (ix < 0x4002D97C) { /* |x| < 3pi/4, special case with n=+-1 */
|
||||
if (hx > 0) {
|
||||
z = x - pio2_1;
|
||||
if (ix != 0x3ff921fb) { /* 33+53 bit pi is good enough */
|
||||
if (ix != 0x3FF921FB) { /* 33+53 bit pi is good enough */
|
||||
y[0] = z - pio2_1t;
|
||||
y[1] = (z - y[0]) - pio2_1t;
|
||||
} else { /* near pi/2, use 33+33+53 bit pi */
|
||||
@ -245,7 +245,7 @@ int32_t __ieee754_rem_pio2(double x, double *y) {
|
||||
return 1;
|
||||
} else { /* negative x */
|
||||
z = x + pio2_1;
|
||||
if (ix != 0x3ff921fb) { /* 33+53 bit pi is good enough */
|
||||
if (ix != 0x3FF921FB) { /* 33+53 bit pi is good enough */
|
||||
y[0] = z + pio2_1t;
|
||||
y[1] = (z - y[0]) + pio2_1t;
|
||||
} else { /* near pi/2, use 33+33+53 bit pi */
|
||||
@ -256,7 +256,7 @@ int32_t __ieee754_rem_pio2(double x, double *y) {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
if (ix <= 0x413921fb) { /* |x| ~<= 2^19*(pi/2), medium size */
|
||||
if (ix <= 0x413921FB) { /* |x| ~<= 2^19*(pi/2), medium size */
|
||||
t = fabs(x);
|
||||
n = static_cast<int32_t>(t * invpio2 + half);
|
||||
fn = static_cast<double>(n);
|
||||
@ -269,7 +269,7 @@ int32_t __ieee754_rem_pio2(double x, double *y) {
|
||||
j = ix >> 20;
|
||||
y[0] = r - w;
|
||||
GET_HIGH_WORD(high, y[0]);
|
||||
i = j - ((high >> 20) & 0x7ff);
|
||||
i = j - ((high >> 20) & 0x7FF);
|
||||
if (i > 16) { /* 2nd iteration needed, good to 118 */
|
||||
t = r;
|
||||
w = fn * pio2_2;
|
||||
@ -277,7 +277,7 @@ int32_t __ieee754_rem_pio2(double x, double *y) {
|
||||
w = fn * pio2_2t - ((t - r) - w);
|
||||
y[0] = r - w;
|
||||
GET_HIGH_WORD(high, y[0]);
|
||||
i = j - ((high >> 20) & 0x7ff);
|
||||
i = j - ((high >> 20) & 0x7FF);
|
||||
if (i > 49) { /* 3rd iteration need, 151 bits acc */
|
||||
t = r; /* will cover all possible cases */
|
||||
w = fn * pio2_3;
|
||||
@ -299,7 +299,7 @@ int32_t __ieee754_rem_pio2(double x, double *y) {
|
||||
/*
|
||||
* all other (large) arguments
|
||||
*/
|
||||
if (ix >= 0x7ff00000) { /* x is inf or NaN */
|
||||
if (ix >= 0x7FF00000) { /* x is inf or NaN */
|
||||
y[0] = y[1] = x - x;
|
||||
return 0;
|
||||
}
|
||||
@ -331,7 +331,7 @@ int32_t __ieee754_rem_pio2(double x, double *y) {
|
||||
*
|
||||
* Algorithm
|
||||
* 1. Since cos(-x) = cos(x), we need only to consider positive x.
|
||||
* 2. if x < 2^-27 (hx<0x3e400000 0), return 1 with inexact if x!=0.
|
||||
* 2. if x < 2^-27 (hx<0x3E400000 0), return 1 with inexact if x!=0.
|
||||
* 3. cos(x) is approximated by a polynomial of degree 14 on
|
||||
* [0,pi/4]
|
||||
* 4 14
|
||||
@ -370,8 +370,8 @@ V8_INLINE double __kernel_cos(double x, double y) {
|
||||
double a, iz, z, r, qx;
|
||||
int32_t ix;
|
||||
GET_HIGH_WORD(ix, x);
|
||||
ix &= 0x7fffffff; /* ix = |x|'s high word*/
|
||||
if (ix < 0x3e400000) { /* if x < 2**27 */
|
||||
ix &= 0x7FFFFFFF; /* ix = |x|'s high word*/
|
||||
if (ix < 0x3E400000) { /* if x < 2**27 */
|
||||
if (static_cast<int>(x) == 0) return one; /* generate inexact */
|
||||
}
|
||||
z = x * x;
|
||||
@ -379,7 +379,7 @@ V8_INLINE double __kernel_cos(double x, double y) {
|
||||
if (ix < 0x3FD33333) { /* if |x| < 0.3 */
|
||||
return one - (0.5 * z - (z * r - x * y));
|
||||
} else {
|
||||
if (ix > 0x3fe90000) { /* x > 0.78125 */
|
||||
if (ix > 0x3FE90000) { /* x > 0.78125 */
|
||||
qx = 0.28125;
|
||||
} else {
|
||||
INSERT_WORDS(qx, ix - 0x00200000, 0); /* x/4 */
|
||||
@ -585,16 +585,16 @@ recompute:
|
||||
iq[i] = 0x1000000 - j;
|
||||
}
|
||||
} else {
|
||||
iq[i] = 0xffffff - j;
|
||||
iq[i] = 0xFFFFFF - j;
|
||||
}
|
||||
}
|
||||
if (q0 > 0) { /* rare case: chance is 1 in 12 */
|
||||
switch (q0) {
|
||||
case 1:
|
||||
iq[jz - 1] &= 0x7fffff;
|
||||
iq[jz - 1] &= 0x7FFFFF;
|
||||
break;
|
||||
case 2:
|
||||
iq[jz - 1] &= 0x3fffff;
|
||||
iq[jz - 1] &= 0x3FFFFF;
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -706,7 +706,7 @@ recompute:
|
||||
*
|
||||
* Algorithm
|
||||
* 1. Since sin(-x) = -sin(x), we need only to consider positive x.
|
||||
* 2. if x < 2^-27 (hx<0x3e400000 0), return x with inexact if x!=0.
|
||||
* 2. if x < 2^-27 (hx<0x3E400000 0), return x with inexact if x!=0.
|
||||
* 3. sin(x) is approximated by a polynomial of degree 13 on
|
||||
* [0,pi/4]
|
||||
* 3 13
|
||||
@ -738,8 +738,8 @@ V8_INLINE double __kernel_sin(double x, double y, int iy) {
|
||||
double z, r, v;
|
||||
int32_t ix;
|
||||
GET_HIGH_WORD(ix, x);
|
||||
ix &= 0x7fffffff; /* high word of x */
|
||||
if (ix < 0x3e400000) { /* |x| < 2**-27 */
|
||||
ix &= 0x7FFFFFFF; /* high word of x */
|
||||
if (ix < 0x3E400000) { /* |x| < 2**-27 */
|
||||
if (static_cast<int>(x) == 0) return x;
|
||||
} /* generate inexact */
|
||||
z = x * x;
|
||||
@ -761,7 +761,7 @@ V8_INLINE double __kernel_sin(double x, double y, int iy) {
|
||||
*
|
||||
* Algorithm
|
||||
* 1. Since tan(-x) = -tan(x), we need only to consider positive x.
|
||||
* 2. if x < 2^-28 (hx<0x3e300000 0), return x with inexact if x!=0.
|
||||
* 2. if x < 2^-28 (hx<0x3E300000 0), return x with inexact if x!=0.
|
||||
* 3. tan(x) is approximated by a odd polynomial of degree 27 on
|
||||
* [0,0.67434]
|
||||
* 3 27
|
||||
@ -813,8 +813,8 @@ double __kernel_tan(double x, double y, int iy) {
|
||||
int32_t ix, hx;
|
||||
|
||||
GET_HIGH_WORD(hx, x); /* high word of x */
|
||||
ix = hx & 0x7fffffff; /* high word of |x| */
|
||||
if (ix < 0x3e300000) { /* x < 2**-28 */
|
||||
ix = hx & 0x7FFFFFFF; /* high word of |x| */
|
||||
if (ix < 0x3E300000) { /* x < 2**-28 */
|
||||
if (static_cast<int>(x) == 0) { /* generate inexact */
|
||||
uint32_t low;
|
||||
GET_LOW_WORD(low, x);
|
||||
@ -934,11 +934,11 @@ double acos(double x) {
|
||||
double z, p, q, r, w, s, c, df;
|
||||
int32_t hx, ix;
|
||||
GET_HIGH_WORD(hx, x);
|
||||
ix = hx & 0x7fffffff;
|
||||
if (ix >= 0x3ff00000) { /* |x| >= 1 */
|
||||
ix = hx & 0x7FFFFFFF;
|
||||
if (ix >= 0x3FF00000) { /* |x| >= 1 */
|
||||
uint32_t lx;
|
||||
GET_LOW_WORD(lx, x);
|
||||
if (((ix - 0x3ff00000) | lx) == 0) { /* |x|==1 */
|
||||
if (((ix - 0x3FF00000) | lx) == 0) { /* |x|==1 */
|
||||
if (hx > 0)
|
||||
return 0.0; /* acos(1) = 0 */
|
||||
else
|
||||
@ -946,8 +946,8 @@ double acos(double x) {
|
||||
}
|
||||
return (x - x) / (x - x); /* acos(|x|>1) is NaN */
|
||||
}
|
||||
if (ix < 0x3fe00000) { /* |x| < 0.5 */
|
||||
if (ix <= 0x3c600000) return pio2_hi + pio2_lo; /*if|x|<2**-57*/
|
||||
if (ix < 0x3FE00000) { /* |x| < 0.5 */
|
||||
if (ix <= 0x3C600000) return pio2_hi + pio2_lo; /*if|x|<2**-57*/
|
||||
z = x * x;
|
||||
p = z * (pS0 + z * (pS1 + z * (pS2 + z * (pS3 + z * (pS4 + z * pS5)))));
|
||||
q = one + z * (qS1 + z * (qS2 + z * (qS3 + z * qS4)));
|
||||
@ -996,15 +996,15 @@ double acosh(double x) {
|
||||
int32_t hx;
|
||||
uint32_t lx;
|
||||
EXTRACT_WORDS(hx, lx, x);
|
||||
if (hx < 0x3ff00000) { /* x < 1 */
|
||||
if (hx < 0x3FF00000) { /* x < 1 */
|
||||
return (x - x) / (x - x);
|
||||
} else if (hx >= 0x41b00000) { /* x > 2**28 */
|
||||
if (hx >= 0x7ff00000) { /* x is inf of NaN */
|
||||
} else if (hx >= 0x41B00000) { /* x > 2**28 */
|
||||
if (hx >= 0x7FF00000) { /* x is inf of NaN */
|
||||
return x + x;
|
||||
} else {
|
||||
return log(x) + ln2; /* acosh(huge)=log(2x) */
|
||||
}
|
||||
} else if (((hx - 0x3ff00000) | lx) == 0) {
|
||||
} else if (((hx - 0x3FF00000) | lx) == 0) {
|
||||
return 0.0; /* acosh(1) = 0 */
|
||||
} else if (hx > 0x40000000) { /* 2**28 > x > 2 */
|
||||
t = x * x;
|
||||
@ -1067,15 +1067,15 @@ double asin(double x) {
|
||||
|
||||
t = 0;
|
||||
GET_HIGH_WORD(hx, x);
|
||||
ix = hx & 0x7fffffff;
|
||||
if (ix >= 0x3ff00000) { /* |x|>= 1 */
|
||||
ix = hx & 0x7FFFFFFF;
|
||||
if (ix >= 0x3FF00000) { /* |x|>= 1 */
|
||||
uint32_t lx;
|
||||
GET_LOW_WORD(lx, x);
|
||||
if (((ix - 0x3ff00000) | lx) == 0) /* asin(1)=+-pi/2 with inexact */
|
||||
if (((ix - 0x3FF00000) | lx) == 0) /* asin(1)=+-pi/2 with inexact */
|
||||
return x * pio2_hi + x * pio2_lo;
|
||||
return (x - x) / (x - x); /* asin(|x|>1) is NaN */
|
||||
} else if (ix < 0x3fe00000) { /* |x|<0.5 */
|
||||
if (ix < 0x3e400000) { /* if |x| < 2**-27 */
|
||||
} else if (ix < 0x3FE00000) { /* |x|<0.5 */
|
||||
if (ix < 0x3E400000) { /* if |x| < 2**-27 */
|
||||
if (huge + x > one) return x; /* return x with inexact if x!=0*/
|
||||
} else {
|
||||
t = x * x;
|
||||
@ -1127,12 +1127,12 @@ double asinh(double x) {
|
||||
double t, w;
|
||||
int32_t hx, ix;
|
||||
GET_HIGH_WORD(hx, x);
|
||||
ix = hx & 0x7fffffff;
|
||||
if (ix >= 0x7ff00000) return x + x; /* x is inf or NaN */
|
||||
if (ix < 0x3e300000) { /* |x|<2**-28 */
|
||||
ix = hx & 0x7FFFFFFF;
|
||||
if (ix >= 0x7FF00000) return x + x; /* x is inf or NaN */
|
||||
if (ix < 0x3E300000) { /* |x|<2**-28 */
|
||||
if (huge + x > one) return x; /* return x inexact except 0 */
|
||||
}
|
||||
if (ix > 0x41b00000) { /* |x| > 2**28 */
|
||||
if (ix > 0x41B00000) { /* |x| > 2**28 */
|
||||
w = log(fabs(x)) + ln2;
|
||||
} else if (ix > 0x40000000) { /* 2**28 > |x| > 2.0 */
|
||||
t = fabs(x);
|
||||
@ -1202,26 +1202,26 @@ double atan(double x) {
|
||||
int32_t ix, hx, id;
|
||||
|
||||
GET_HIGH_WORD(hx, x);
|
||||
ix = hx & 0x7fffffff;
|
||||
ix = hx & 0x7FFFFFFF;
|
||||
if (ix >= 0x44100000) { /* if |x| >= 2^66 */
|
||||
uint32_t low;
|
||||
GET_LOW_WORD(low, x);
|
||||
if (ix > 0x7ff00000 || (ix == 0x7ff00000 && (low != 0)))
|
||||
if (ix > 0x7FF00000 || (ix == 0x7FF00000 && (low != 0)))
|
||||
return x + x; /* NaN */
|
||||
if (hx > 0)
|
||||
return atanhi[3] + *(volatile double *)&atanlo[3];
|
||||
else
|
||||
return -atanhi[3] - *(volatile double *)&atanlo[3];
|
||||
}
|
||||
if (ix < 0x3fdc0000) { /* |x| < 0.4375 */
|
||||
if (ix < 0x3e400000) { /* |x| < 2^-27 */
|
||||
if (ix < 0x3FDC0000) { /* |x| < 0.4375 */
|
||||
if (ix < 0x3E400000) { /* |x| < 2^-27 */
|
||||
if (huge + x > one) return x; /* raise inexact */
|
||||
}
|
||||
id = -1;
|
||||
} else {
|
||||
x = fabs(x);
|
||||
if (ix < 0x3ff30000) { /* |x| < 1.1875 */
|
||||
if (ix < 0x3fe60000) { /* 7/16 <=|x|<11/16 */
|
||||
if (ix < 0x3FF30000) { /* |x| < 1.1875 */
|
||||
if (ix < 0x3FE60000) { /* 7/16 <=|x|<11/16 */
|
||||
id = 0;
|
||||
x = (2.0 * x - one) / (2.0 + x);
|
||||
} else { /* 11/16<=|x|< 19/16 */
|
||||
@ -1294,14 +1294,14 @@ double atan2(double y, double x) {
|
||||
uint32_t lx, ly;
|
||||
|
||||
EXTRACT_WORDS(hx, lx, x);
|
||||
ix = hx & 0x7fffffff;
|
||||
ix = hx & 0x7FFFFFFF;
|
||||
EXTRACT_WORDS(hy, ly, y);
|
||||
iy = hy & 0x7fffffff;
|
||||
if (((ix | ((lx | -static_cast<int32_t>(lx)) >> 31)) > 0x7ff00000) ||
|
||||
((iy | ((ly | -static_cast<int32_t>(ly)) >> 31)) > 0x7ff00000)) {
|
||||
iy = hy & 0x7FFFFFFF;
|
||||
if (((ix | ((lx | -static_cast<int32_t>(lx)) >> 31)) > 0x7FF00000) ||
|
||||
((iy | ((ly | -static_cast<int32_t>(ly)) >> 31)) > 0x7FF00000)) {
|
||||
return x + y; /* x or y is NaN */
|
||||
}
|
||||
if (((hx - 0x3ff00000) | lx) == 0) return atan(y); /* x=1.0 */
|
||||
if (((hx - 0x3FF00000) | lx) == 0) return atan(y); /* x=1.0 */
|
||||
m = ((hy >> 31) & 1) | ((hx >> 30) & 2); /* 2*sign(x)+sign(y) */
|
||||
|
||||
/* when y = 0 */
|
||||
@ -1320,8 +1320,8 @@ double atan2(double y, double x) {
|
||||
if ((ix | lx) == 0) return (hy < 0) ? -pi_o_2 - tiny : pi_o_2 + tiny;
|
||||
|
||||
/* when x is INF */
|
||||
if (ix == 0x7ff00000) {
|
||||
if (iy == 0x7ff00000) {
|
||||
if (ix == 0x7FF00000) {
|
||||
if (iy == 0x7FF00000) {
|
||||
switch (m) {
|
||||
case 0:
|
||||
return pi_o_4 + tiny; /* atan(+INF,+INF) */
|
||||
@ -1346,7 +1346,7 @@ double atan2(double y, double x) {
|
||||
}
|
||||
}
|
||||
/* when y is INF */
|
||||
if (iy == 0x7ff00000) return (hy < 0) ? -pi_o_2 - tiny : pi_o_2 + tiny;
|
||||
if (iy == 0x7FF00000) return (hy < 0) ? -pi_o_2 - tiny : pi_o_2 + tiny;
|
||||
|
||||
/* compute y/x */
|
||||
k = (iy - ix) >> 20;
|
||||
@ -1408,10 +1408,10 @@ double cos(double x) {
|
||||
GET_HIGH_WORD(ix, x);
|
||||
|
||||
/* |x| ~< pi/4 */
|
||||
ix &= 0x7fffffff;
|
||||
if (ix <= 0x3fe921fb) {
|
||||
ix &= 0x7FFFFFFF;
|
||||
if (ix <= 0x3FE921FB) {
|
||||
return __kernel_cos(x, z);
|
||||
} else if (ix >= 0x7ff00000) {
|
||||
} else if (ix >= 0x7FF00000) {
|
||||
/* cos(Inf or NaN) is NaN */
|
||||
return x - x;
|
||||
} else {
|
||||
@ -1497,18 +1497,18 @@ double exp(double x) {
|
||||
one = 1.0,
|
||||
halF[2] = {0.5, -0.5},
|
||||
o_threshold = 7.09782712893383973096e+02, /* 0x40862E42, 0xFEFA39EF */
|
||||
u_threshold = -7.45133219101941108420e+02, /* 0xc0874910, 0xD52D3051 */
|
||||
ln2HI[2] = {6.93147180369123816490e-01, /* 0x3fe62e42, 0xfee00000 */
|
||||
-6.93147180369123816490e-01}, /* 0xbfe62e42, 0xfee00000 */
|
||||
ln2LO[2] = {1.90821492927058770002e-10, /* 0x3dea39ef, 0x35793c76 */
|
||||
-1.90821492927058770002e-10}, /* 0xbdea39ef, 0x35793c76 */
|
||||
invln2 = 1.44269504088896338700e+00, /* 0x3ff71547, 0x652b82fe */
|
||||
u_threshold = -7.45133219101941108420e+02, /* 0xC0874910, 0xD52D3051 */
|
||||
ln2HI[2] = {6.93147180369123816490e-01, /* 0x3FE62E42, 0xFEE00000 */
|
||||
-6.93147180369123816490e-01}, /* 0xBFE62E42, 0xFEE00000 */
|
||||
ln2LO[2] = {1.90821492927058770002e-10, /* 0x3DEA39EF, 0x35793C76 */
|
||||
-1.90821492927058770002e-10}, /* 0xBDEA39EF, 0x35793C76 */
|
||||
invln2 = 1.44269504088896338700e+00, /* 0x3FF71547, 0x652B82FE */
|
||||
P1 = 1.66666666666666019037e-01, /* 0x3FC55555, 0x5555553E */
|
||||
P2 = -2.77777777770155933842e-03, /* 0xBF66C16C, 0x16BEBD93 */
|
||||
P3 = 6.61375632143793436117e-05, /* 0x3F11566A, 0xAF25DE2C */
|
||||
P4 = -1.65339022054652515390e-06, /* 0xBEBBBD41, 0xC5D26BF1 */
|
||||
P5 = 4.13813679705723846039e-08, /* 0x3E663769, 0x72BEA4D0 */
|
||||
E = 2.718281828459045; /* 0x4005bf0a, 0x8b145769 */
|
||||
E = 2.718281828459045; /* 0x4005BF0A, 0x8B145769 */
|
||||
|
||||
static volatile double
|
||||
huge = 1.0e+300,
|
||||
@ -1521,14 +1521,14 @@ double exp(double x) {
|
||||
|
||||
GET_HIGH_WORD(hx, x);
|
||||
xsb = (hx >> 31) & 1; /* sign bit of x */
|
||||
hx &= 0x7fffffff; /* high word of |x| */
|
||||
hx &= 0x7FFFFFFF; /* high word of |x| */
|
||||
|
||||
/* filter out non-finite argument */
|
||||
if (hx >= 0x40862E42) { /* if |x|>=709.78... */
|
||||
if (hx >= 0x7ff00000) {
|
||||
if (hx >= 0x7FF00000) {
|
||||
uint32_t lx;
|
||||
GET_LOW_WORD(lx, x);
|
||||
if (((hx & 0xfffff) | lx) != 0)
|
||||
if (((hx & 0xFFFFF) | lx) != 0)
|
||||
return x + x; /* NaN */
|
||||
else
|
||||
return (xsb == 0) ? x : 0.0; /* exp(+-inf)={inf,0} */
|
||||
@ -1538,7 +1538,7 @@ double exp(double x) {
|
||||
}
|
||||
|
||||
/* argument reduction */
|
||||
if (hx > 0x3fd62e42) { /* if |x| > 0.5 ln2 */
|
||||
if (hx > 0x3FD62E42) { /* if |x| > 0.5 ln2 */
|
||||
if (hx < 0x3FF0A2B2) { /* and |x| < 1.5 ln2 */
|
||||
/* TODO(rtoy): We special case exp(1) here to return the correct
|
||||
* value of E, as the computation below would get the last bit
|
||||
@ -1555,7 +1555,7 @@ double exp(double x) {
|
||||
lo = t * ln2LO[0];
|
||||
}
|
||||
STRICT_ASSIGN(double, x, hi - lo);
|
||||
} else if (hx < 0x3e300000) { /* when |x|<2**-28 */
|
||||
} else if (hx < 0x3E300000) { /* when |x|<2**-28 */
|
||||
if (huge + x > one) return one + x; /* trigger inexact */
|
||||
} else {
|
||||
k = 0;
|
||||
@ -1564,9 +1564,9 @@ double exp(double x) {
|
||||
/* x is now in primary range */
|
||||
t = x * x;
|
||||
if (k >= -1021) {
|
||||
INSERT_WORDS(twopk, 0x3ff00000 + (k << 20), 0);
|
||||
INSERT_WORDS(twopk, 0x3FF00000 + (k << 20), 0);
|
||||
} else {
|
||||
INSERT_WORDS(twopk, 0x3ff00000 + ((k + 1000) << 20), 0);
|
||||
INSERT_WORDS(twopk, 0x3FF00000 + ((k + 1000) << 20), 0);
|
||||
}
|
||||
c = x - t * (P1 + t * (P2 + t * (P3 + t * (P4 + t * P5))));
|
||||
if (k == 0) {
|
||||
@ -1607,13 +1607,13 @@ double atanh(double x) {
|
||||
int32_t hx, ix;
|
||||
uint32_t lx;
|
||||
EXTRACT_WORDS(hx, lx, x);
|
||||
ix = hx & 0x7fffffff;
|
||||
if ((ix | ((lx | -static_cast<int32_t>(lx)) >> 31)) > 0x3ff00000) /* |x|>1 */
|
||||
ix = hx & 0x7FFFFFFF;
|
||||
if ((ix | ((lx | -static_cast<int32_t>(lx)) >> 31)) > 0x3FF00000) /* |x|>1 */
|
||||
return (x - x) / (x - x);
|
||||
if (ix == 0x3ff00000) return x / zero;
|
||||
if (ix < 0x3e300000 && (huge + x) > zero) return x; /* x<2**-28 */
|
||||
if (ix == 0x3FF00000) return x / zero;
|
||||
if (ix < 0x3E300000 && (huge + x) > zero) return x; /* x<2**-28 */
|
||||
SET_HIGH_WORD(x, ix);
|
||||
if (ix < 0x3fe00000) { /* x < 0.5 */
|
||||
if (ix < 0x3FE00000) { /* x < 0.5 */
|
||||
t = x + x;
|
||||
t = 0.5 * log1p(t + t * x / (one - x));
|
||||
} else {
|
||||
@ -1699,21 +1699,21 @@ double log(double x) {
|
||||
|
||||
k = 0;
|
||||
if (hx < 0x00100000) { /* x < 2**-1022 */
|
||||
if (((hx & 0x7fffffff) | lx) == 0)
|
||||
if (((hx & 0x7FFFFFFF) | lx) == 0)
|
||||
return -two54 / vzero; /* log(+-0)=-inf */
|
||||
if (hx < 0) return (x - x) / zero; /* log(-#) = NaN */
|
||||
k -= 54;
|
||||
x *= two54; /* subnormal number, scale up x */
|
||||
GET_HIGH_WORD(hx, x);
|
||||
}
|
||||
if (hx >= 0x7ff00000) return x + x;
|
||||
if (hx >= 0x7FF00000) return x + x;
|
||||
k += (hx >> 20) - 1023;
|
||||
hx &= 0x000fffff;
|
||||
i = (hx + 0x95f64) & 0x100000;
|
||||
SET_HIGH_WORD(x, hx | (i ^ 0x3ff00000)); /* normalize x or x/2 */
|
||||
hx &= 0x000FFFFF;
|
||||
i = (hx + 0x95F64) & 0x100000;
|
||||
SET_HIGH_WORD(x, hx | (i ^ 0x3FF00000)); /* normalize x or x/2 */
|
||||
k += (i >> 20);
|
||||
f = x - 1.0;
|
||||
if ((0x000fffff & (2 + hx)) < 3) { /* -2**-20 <= f < 2**-20 */
|
||||
if ((0x000FFFFF & (2 + hx)) < 3) { /* -2**-20 <= f < 2**-20 */
|
||||
if (f == zero) {
|
||||
if (k == 0) {
|
||||
return zero;
|
||||
@ -1733,9 +1733,9 @@ double log(double x) {
|
||||
s = f / (2.0 + f);
|
||||
dk = static_cast<double>(k);
|
||||
z = s * s;
|
||||
i = hx - 0x6147a;
|
||||
i = hx - 0x6147A;
|
||||
w = z * z;
|
||||
j = 0x6b851 - hx;
|
||||
j = 0x6B851 - hx;
|
||||
t1 = w * (Lg2 + w * (Lg4 + w * Lg6));
|
||||
t2 = z * (Lg1 + w * (Lg3 + w * (Lg5 + w * Lg7)));
|
||||
i |= j;
|
||||
@ -1838,30 +1838,30 @@ double log1p(double x) {
|
||||
int32_t k, hx, hu, ax;
|
||||
|
||||
GET_HIGH_WORD(hx, x);
|
||||
ax = hx & 0x7fffffff;
|
||||
ax = hx & 0x7FFFFFFF;
|
||||
|
||||
k = 1;
|
||||
if (hx < 0x3FDA827A) { /* 1+x < sqrt(2)+ */
|
||||
if (ax >= 0x3ff00000) { /* x <= -1.0 */
|
||||
if (ax >= 0x3FF00000) { /* x <= -1.0 */
|
||||
if (x == -1.0)
|
||||
return -two54 / vzero; /* log1p(-1)=+inf */
|
||||
else
|
||||
return (x - x) / (x - x); /* log1p(x<-1)=NaN */
|
||||
}
|
||||
if (ax < 0x3e200000) { /* |x| < 2**-29 */
|
||||
if (ax < 0x3E200000) { /* |x| < 2**-29 */
|
||||
if (two54 + x > zero /* raise inexact */
|
||||
&& ax < 0x3c900000) /* |x| < 2**-54 */
|
||||
&& ax < 0x3C900000) /* |x| < 2**-54 */
|
||||
return x;
|
||||
else
|
||||
return x - x * x * 0.5;
|
||||
}
|
||||
if (hx > 0 || hx <= static_cast<int32_t>(0xbfd2bec4)) {
|
||||
if (hx > 0 || hx <= static_cast<int32_t>(0xBFD2BEC4)) {
|
||||
k = 0;
|
||||
f = x;
|
||||
hu = 1;
|
||||
} /* sqrt(2)/2- <= 1+x < sqrt(2)+ */
|
||||
}
|
||||
if (hx >= 0x7ff00000) return x + x;
|
||||
if (hx >= 0x7FF00000) return x + x;
|
||||
if (k != 0) {
|
||||
if (hx < 0x43400000) {
|
||||
STRICT_ASSIGN(double, u, 1.0 + x);
|
||||
@ -1875,7 +1875,7 @@ double log1p(double x) {
|
||||
k = (hu >> 20) - 1023;
|
||||
c = 0;
|
||||
}
|
||||
hu &= 0x000fffff;
|
||||
hu &= 0x000FFFFF;
|
||||
/*
|
||||
* The approximation to sqrt(2) used in thresholds is not
|
||||
* critical. However, the ones used above must give less
|
||||
@ -1883,11 +1883,11 @@ double log1p(double x) {
|
||||
* never reached from here, since here we have committed to
|
||||
* using the correction term but don't use it if k==0.
|
||||
*/
|
||||
if (hu < 0x6a09e) { /* u ~< sqrt(2) */
|
||||
SET_HIGH_WORD(u, hu | 0x3ff00000); /* normalize u */
|
||||
if (hu < 0x6A09E) { /* u ~< sqrt(2) */
|
||||
SET_HIGH_WORD(u, hu | 0x3FF00000); /* normalize u */
|
||||
} else {
|
||||
k += 1;
|
||||
SET_HIGH_WORD(u, hu | 0x3fe00000); /* normalize u/2 */
|
||||
SET_HIGH_WORD(u, hu | 0x3FE00000); /* normalize u/2 */
|
||||
hu = (0x00100000 - hu) >> 2;
|
||||
}
|
||||
f = u - 1.0;
|
||||
@ -2012,8 +2012,8 @@ static inline double k_log1p(double f) {
|
||||
double log2(double x) {
|
||||
static const double
|
||||
two54 = 1.80143985094819840000e+16, /* 0x43500000, 0x00000000 */
|
||||
ivln2hi = 1.44269504072144627571e+00, /* 0x3ff71547, 0x65200000 */
|
||||
ivln2lo = 1.67517131648865118353e-10; /* 0x3de705fc, 0x2eefa200 */
|
||||
ivln2hi = 1.44269504072144627571e+00, /* 0x3FF71547, 0x65200000 */
|
||||
ivln2lo = 1.67517131648865118353e-10; /* 0x3DE705FC, 0x2EEFA200 */
|
||||
|
||||
static const double zero = 0.0;
|
||||
static volatile double vzero = 0.0;
|
||||
@ -2026,19 +2026,19 @@ double log2(double x) {
|
||||
|
||||
k = 0;
|
||||
if (hx < 0x00100000) { /* x < 2**-1022 */
|
||||
if (((hx & 0x7fffffff) | lx) == 0)
|
||||
if (((hx & 0x7FFFFFFF) | lx) == 0)
|
||||
return -two54 / vzero; /* log(+-0)=-inf */
|
||||
if (hx < 0) return (x - x) / zero; /* log(-#) = NaN */
|
||||
k -= 54;
|
||||
x *= two54; /* subnormal number, scale up x */
|
||||
GET_HIGH_WORD(hx, x);
|
||||
}
|
||||
if (hx >= 0x7ff00000) return x + x;
|
||||
if (hx == 0x3ff00000 && lx == 0) return zero; /* log(1) = +0 */
|
||||
if (hx >= 0x7FF00000) return x + x;
|
||||
if (hx == 0x3FF00000 && lx == 0) return zero; /* log(1) = +0 */
|
||||
k += (hx >> 20) - 1023;
|
||||
hx &= 0x000fffff;
|
||||
i = (hx + 0x95f64) & 0x100000;
|
||||
SET_HIGH_WORD(x, hx | (i ^ 0x3ff00000)); /* normalize x or x/2 */
|
||||
hx &= 0x000FFFFF;
|
||||
i = (hx + 0x95F64) & 0x100000;
|
||||
SET_HIGH_WORD(x, hx | (i ^ 0x3FF00000)); /* normalize x or x/2 */
|
||||
k += (i >> 20);
|
||||
y = static_cast<double>(k);
|
||||
f = x - 1.0;
|
||||
@ -2133,7 +2133,7 @@ double log10(double x) {
|
||||
|
||||
k = 0;
|
||||
if (hx < 0x00100000) { /* x < 2**-1022 */
|
||||
if (((hx & 0x7fffffff) | lx) == 0)
|
||||
if (((hx & 0x7FFFFFFF) | lx) == 0)
|
||||
return -two54 / vzero; /* log(+-0)=-inf */
|
||||
if (hx < 0) return (x - x) / zero; /* log(-#) = NaN */
|
||||
k -= 54;
|
||||
@ -2141,12 +2141,12 @@ double log10(double x) {
|
||||
GET_HIGH_WORD(hx, x);
|
||||
GET_LOW_WORD(lx, x);
|
||||
}
|
||||
if (hx >= 0x7ff00000) return x + x;
|
||||
if (hx == 0x3ff00000 && lx == 0) return zero; /* log(1) = +0 */
|
||||
if (hx >= 0x7FF00000) return x + x;
|
||||
if (hx == 0x3FF00000 && lx == 0) return zero; /* log(1) = +0 */
|
||||
k += (hx >> 20) - 1023;
|
||||
|
||||
i = (k & 0x80000000) >> 31;
|
||||
hx = (hx & 0x000fffff) | ((0x3ff - i) << 20);
|
||||
hx = (hx & 0x000FFFFF) | ((0x3FF - i) << 20);
|
||||
y = k + i;
|
||||
SET_HIGH_WORD(x, hx);
|
||||
SET_LOW_WORD(x, lx);
|
||||
@ -2254,9 +2254,9 @@ double expm1(double x) {
|
||||
one = 1.0,
|
||||
tiny = 1.0e-300,
|
||||
o_threshold = 7.09782712893383973096e+02, /* 0x40862E42, 0xFEFA39EF */
|
||||
ln2_hi = 6.93147180369123816490e-01, /* 0x3fe62e42, 0xfee00000 */
|
||||
ln2_lo = 1.90821492927058770002e-10, /* 0x3dea39ef, 0x35793c76 */
|
||||
invln2 = 1.44269504088896338700e+00, /* 0x3ff71547, 0x652b82fe */
|
||||
ln2_hi = 6.93147180369123816490e-01, /* 0x3FE62E42, 0xFEE00000 */
|
||||
ln2_lo = 1.90821492927058770002e-10, /* 0x3DEA39EF, 0x35793C76 */
|
||||
invln2 = 1.44269504088896338700e+00, /* 0x3FF71547, 0x652B82FE */
|
||||
/* Scaled Q's: Qn_here = 2**n * Qn_above, for R(2*z) where z = hxs =
|
||||
x*x/2: */
|
||||
Q1 = -3.33333333333331316428e-02, /* BFA11111 111110F4 */
|
||||
@ -2273,15 +2273,15 @@ double expm1(double x) {
|
||||
|
||||
GET_HIGH_WORD(hx, x);
|
||||
xsb = hx & 0x80000000; /* sign bit of x */
|
||||
hx &= 0x7fffffff; /* high word of |x| */
|
||||
hx &= 0x7FFFFFFF; /* high word of |x| */
|
||||
|
||||
/* filter out huge and non-finite argument */
|
||||
if (hx >= 0x4043687A) { /* if |x|>=56*ln2 */
|
||||
if (hx >= 0x40862E42) { /* if |x|>=709.78... */
|
||||
if (hx >= 0x7ff00000) {
|
||||
if (hx >= 0x7FF00000) {
|
||||
uint32_t low;
|
||||
GET_LOW_WORD(low, x);
|
||||
if (((hx & 0xfffff) | low) != 0)
|
||||
if (((hx & 0xFFFFF) | low) != 0)
|
||||
return x + x; /* NaN */
|
||||
else
|
||||
return (xsb == 0) ? x : -1.0; /* exp(+-inf)={inf,-1} */
|
||||
@ -2295,7 +2295,7 @@ double expm1(double x) {
|
||||
}
|
||||
|
||||
/* argument reduction */
|
||||
if (hx > 0x3fd62e42) { /* if |x| > 0.5 ln2 */
|
||||
if (hx > 0x3FD62E42) { /* if |x| > 0.5 ln2 */
|
||||
if (hx < 0x3FF0A2B2) { /* and |x| < 1.5 ln2 */
|
||||
if (xsb == 0) {
|
||||
hi = x - ln2_hi;
|
||||
@ -2314,7 +2314,7 @@ double expm1(double x) {
|
||||
}
|
||||
STRICT_ASSIGN(double, x, hi - lo);
|
||||
c = (hi - x) - lo;
|
||||
} else if (hx < 0x3c900000) { /* when |x|<2**-54, return x */
|
||||
} else if (hx < 0x3C900000) { /* when |x|<2**-54, return x */
|
||||
t = huge + x; /* return x with inexact flags when x!=0 */
|
||||
return x - (t - (huge + x));
|
||||
} else {
|
||||
@ -2330,7 +2330,7 @@ double expm1(double x) {
|
||||
if (k == 0) {
|
||||
return x - (x * e - hxs); /* c is 0 */
|
||||
} else {
|
||||
INSERT_WORDS(twopk, 0x3ff00000 + (k << 20), 0); /* 2^k */
|
||||
INSERT_WORDS(twopk, 0x3FF00000 + (k << 20), 0); /* 2^k */
|
||||
e = (x * (e - c) - c);
|
||||
e -= hxs;
|
||||
if (k == -1) return 0.5 * (x - e) - 0.5;
|
||||
@ -2353,11 +2353,11 @@ double expm1(double x) {
|
||||
}
|
||||
t = one;
|
||||
if (k < 20) {
|
||||
SET_HIGH_WORD(t, 0x3ff00000 - (0x200000 >> k)); /* t=1-2^-k */
|
||||
SET_HIGH_WORD(t, 0x3FF00000 - (0x200000 >> k)); /* t=1-2^-k */
|
||||
y = t - (e - x);
|
||||
y = y * twopk;
|
||||
} else {
|
||||
SET_HIGH_WORD(t, ((0x3ff - k) << 20)); /* 2^-k */
|
||||
SET_HIGH_WORD(t, ((0x3FF - k) << 20)); /* 2^-k */
|
||||
y = x - (e + t);
|
||||
y += one;
|
||||
y = y * twopk;
|
||||
@ -2372,11 +2372,11 @@ double cbrt(double x) {
|
||||
B2 = 696219795; /* B2 = (1023-1023/3-54/3-0.03306235651)*2**20 */
|
||||
|
||||
/* |1/cbrt(x) - p(x)| < 2**-23.5 (~[-7.93e-8, 7.929e-8]). */
|
||||
static const double P0 = 1.87595182427177009643, /* 0x3ffe03e6, 0x0f61e692 */
|
||||
P1 = -1.88497979543377169875, /* 0xbffe28e0, 0x92f02420 */
|
||||
P2 = 1.621429720105354466140, /* 0x3ff9f160, 0x4a49d6c2 */
|
||||
P3 = -0.758397934778766047437, /* 0xbfe844cb, 0xbee751d9 */
|
||||
P4 = 0.145996192886612446982; /* 0x3fc2b000, 0xd4e4edd7 */
|
||||
static const double P0 = 1.87595182427177009643, /* 0x3FFE03E6, 0x0F61E692 */
|
||||
P1 = -1.88497979543377169875, /* 0xBFFE28E0, 0x92F02420 */
|
||||
P2 = 1.621429720105354466140, /* 0x3FF9F160, 0x4A49D6C2 */
|
||||
P3 = -0.758397934778766047437, /* 0xBFE844CB, 0xBEE751D9 */
|
||||
P4 = 0.145996192886612446982; /* 0x3FC2B000, 0xD4E4EDD7 */
|
||||
|
||||
int32_t hx;
|
||||
union {
|
||||
@ -2390,7 +2390,7 @@ double cbrt(double x) {
|
||||
EXTRACT_WORDS(hx, low, x);
|
||||
sign = hx & 0x80000000; /* sign= sign(x) */
|
||||
hx ^= sign;
|
||||
if (hx >= 0x7ff00000) return (x + x); /* cbrt(NaN,INF) is itself */
|
||||
if (hx >= 0x7FF00000) return (x + x); /* cbrt(NaN,INF) is itself */
|
||||
|
||||
/*
|
||||
* Rough cbrt to 5 bits:
|
||||
@ -2412,7 +2412,7 @@ double cbrt(double x) {
|
||||
SET_HIGH_WORD(t, 0x43500000); /* set t= 2**54 */
|
||||
t *= x;
|
||||
GET_HIGH_WORD(high, t);
|
||||
INSERT_WORDS(t, sign | ((high & 0x7fffffff) / 3 + B2), 0);
|
||||
INSERT_WORDS(t, sign | ((high & 0x7FFFFFFF) / 3 + B2), 0);
|
||||
} else {
|
||||
INSERT_WORDS(t, sign | (hx / 3 + B1), 0);
|
||||
}
|
||||
@ -2441,7 +2441,7 @@ double cbrt(double x) {
|
||||
* before the final error is larger than 0.667 ulps.
|
||||
*/
|
||||
u.value = t;
|
||||
u.bits = (u.bits + 0x80000000) & 0xffffffffc0000000ULL;
|
||||
u.bits = (u.bits + 0x80000000) & 0xFFFFFFFFC0000000ULL;
|
||||
t = u.value;
|
||||
|
||||
/* one step Newton iteration to 53 bits with error < 0.667 ulps */
|
||||
@ -2492,10 +2492,10 @@ double sin(double x) {
|
||||
GET_HIGH_WORD(ix, x);
|
||||
|
||||
/* |x| ~< pi/4 */
|
||||
ix &= 0x7fffffff;
|
||||
if (ix <= 0x3fe921fb) {
|
||||
ix &= 0x7FFFFFFF;
|
||||
if (ix <= 0x3FE921FB) {
|
||||
return __kernel_sin(x, z, 0);
|
||||
} else if (ix >= 0x7ff00000) {
|
||||
} else if (ix >= 0x7FF00000) {
|
||||
/* sin(Inf or NaN) is NaN */
|
||||
return x - x;
|
||||
} else {
|
||||
@ -2551,10 +2551,10 @@ double tan(double x) {
|
||||
GET_HIGH_WORD(ix, x);
|
||||
|
||||
/* |x| ~< pi/4 */
|
||||
ix &= 0x7fffffff;
|
||||
if (ix <= 0x3fe921fb) {
|
||||
ix &= 0x7FFFFFFF;
|
||||
if (ix <= 0x3FE921FB) {
|
||||
return __kernel_tan(x, z, 1);
|
||||
} else if (ix >= 0x7ff00000) {
|
||||
} else if (ix >= 0x7FF00000) {
|
||||
/* tan(Inf or NaN) is NaN */
|
||||
return x - x; /* NaN */
|
||||
} else {
|
||||
@ -2596,14 +2596,14 @@ double cosh(double x) {
|
||||
|
||||
/* High word of |x|. */
|
||||
GET_HIGH_WORD(ix, x);
|
||||
ix &= 0x7fffffff;
|
||||
ix &= 0x7FFFFFFF;
|
||||
|
||||
// |x| in [0,0.5*log2], return 1+expm1(|x|)^2/(2*exp(|x|))
|
||||
if (ix < 0x3fd62e43) {
|
||||
if (ix < 0x3FD62E43) {
|
||||
double t = expm1(fabs(x));
|
||||
double w = one + t;
|
||||
// For |x| < 2^-55, cosh(x) = 1
|
||||
if (ix < 0x3c800000) return w;
|
||||
if (ix < 0x3C800000) return w;
|
||||
return one + (t * t) / (w + w);
|
||||
}
|
||||
|
||||
@ -2614,7 +2614,7 @@ double cosh(double x) {
|
||||
}
|
||||
|
||||
// |x| in [22, log(maxdouble)], return half*exp(|x|)
|
||||
if (ix < 0x40862e42) return half * exp(fabs(x));
|
||||
if (ix < 0x40862E42) return half * exp(fabs(x));
|
||||
|
||||
// |x| in [log(maxdouble), overflowthreshold]
|
||||
if (fabs(x) <= KCOSH_OVERFLOW) {
|
||||
@ -2624,7 +2624,7 @@ double cosh(double x) {
|
||||
}
|
||||
|
||||
/* x is INF or NaN */
|
||||
if (ix >= 0x7ff00000) return x * x;
|
||||
if (ix >= 0x7FF00000) return x * x;
|
||||
|
||||
// |x| > overflowthreshold.
|
||||
return huge * huge;
|
||||
@ -2653,7 +2653,7 @@ double sinh(double x) {
|
||||
static const double KSINH_OVERFLOW = 710.4758600739439,
|
||||
TWO_M28 =
|
||||
3.725290298461914e-9, // 2^-28, empty lower half
|
||||
LOG_MAXD = 709.7822265625; // 0x40862e42 00000000, empty lower half
|
||||
LOG_MAXD = 709.7822265625; // 0x40862E42 00000000, empty lower half
|
||||
static const double shuge = 1.0e307;
|
||||
|
||||
double h = (x < 0) ? -0.5 : 0.5;
|
||||
@ -2712,10 +2712,10 @@ double tanh(double x) {
|
||||
int32_t jx, ix;
|
||||
|
||||
GET_HIGH_WORD(jx, x);
|
||||
ix = jx & 0x7fffffff;
|
||||
ix = jx & 0x7FFFFFFF;
|
||||
|
||||
/* x is INF or NaN */
|
||||
if (ix >= 0x7ff00000) {
|
||||
if (ix >= 0x7FF00000) {
|
||||
if (jx >= 0)
|
||||
return one / x + one; /* tanh(+-inf)=+-1 */
|
||||
else
|
||||
@ -2724,10 +2724,10 @@ double tanh(double x) {
|
||||
|
||||
/* |x| < 22 */
|
||||
if (ix < 0x40360000) { /* |x|<22 */
|
||||
if (ix < 0x3e300000) { /* |x|<2**-28 */
|
||||
if (ix < 0x3E300000) { /* |x|<2**-28 */
|
||||
if (huge + x > one) return x; /* tanh(tiny) = tiny with inexact */
|
||||
}
|
||||
if (ix >= 0x3ff00000) { /* |x|>=1 */
|
||||
if (ix >= 0x3FF00000) { /* |x|>=1 */
|
||||
t = expm1(two * fabs(x));
|
||||
z = one - two / (t + two);
|
||||
} else {
|
||||
|
12
deps/v8/src/base/lazy-instance.h
vendored
12
deps/v8/src/base/lazy-instance.h
vendored
@ -168,17 +168,13 @@ struct LazyInstanceImpl {
|
||||
typedef typename AllocationTrait::StorageType StorageType;
|
||||
|
||||
private:
|
||||
static void InitInstance(StorageType* storage) {
|
||||
AllocationTrait::template InitStorageUsingTrait<CreateTrait>(storage);
|
||||
static void InitInstance(void* storage) {
|
||||
AllocationTrait::template InitStorageUsingTrait<CreateTrait>(
|
||||
static_cast<StorageType*>(storage));
|
||||
}
|
||||
|
||||
void Init() const {
|
||||
InitOnceTrait::Init(
|
||||
&once_,
|
||||
// Casts to void* are needed here to avoid breaking strict aliasing
|
||||
// rules.
|
||||
reinterpret_cast<void(*)(void*)>(&InitInstance), // NOLINT
|
||||
reinterpret_cast<void*>(&storage_));
|
||||
InitOnceTrait::Init(&once_, &InitInstance, static_cast<void*>(&storage_));
|
||||
}
|
||||
|
||||
public:
|
||||
|
2
deps/v8/src/base/logging.cc
vendored
2
deps/v8/src/base/logging.cc
vendored
@ -119,8 +119,6 @@ DEFINE_CHECK_OP_IMPL(GT)
|
||||
} // namespace base
|
||||
} // namespace v8
|
||||
|
||||
|
||||
// Contains protection against recursive calls (faults while handling faults).
|
||||
void V8_Fatal(const char* file, int line, const char* format, ...) {
|
||||
fflush(stdout);
|
||||
fflush(stderr);
|
||||
|
18
deps/v8/src/base/logging.h
vendored
18
deps/v8/src/base/logging.h
vendored
@ -20,23 +20,13 @@
|
||||
V8_BASE_EXPORT V8_NOINLINE void V8_Dcheck(const char* file, int line,
|
||||
const char* message);
|
||||
|
||||
// The FATAL, UNREACHABLE and UNIMPLEMENTED macros are useful during
|
||||
// development, but they should not be relied on in the final product.
|
||||
#ifdef DEBUG
|
||||
#define FATAL(msg) \
|
||||
V8_Fatal(__FILE__, __LINE__, "%s", (msg))
|
||||
#define UNIMPLEMENTED() \
|
||||
V8_Fatal(__FILE__, __LINE__, "unimplemented code")
|
||||
#define UNREACHABLE() \
|
||||
V8_Fatal(__FILE__, __LINE__, "unreachable code")
|
||||
#define FATAL(...) V8_Fatal(__FILE__, __LINE__, __VA_ARGS__)
|
||||
#else
|
||||
#define FATAL(msg) \
|
||||
V8_Fatal("", 0, "%s", (msg))
|
||||
#define UNIMPLEMENTED() \
|
||||
V8_Fatal("", 0, "unimplemented code")
|
||||
#define UNREACHABLE() V8_Fatal("", 0, "unreachable code")
|
||||
#define FATAL(...) V8_Fatal("", 0, __VA_ARGS__)
|
||||
#endif
|
||||
|
||||
#define UNIMPLEMENTED() FATAL("unimplemented code")
|
||||
#define UNREACHABLE() FATAL("unreachable code")
|
||||
|
||||
namespace v8 {
|
||||
namespace base {
|
||||
|
57
deps/v8/src/base/macros.h
vendored
57
deps/v8/src/base/macros.h
vendored
@ -5,6 +5,8 @@
|
||||
#ifndef V8_BASE_MACROS_H_
|
||||
#define V8_BASE_MACROS_H_
|
||||
|
||||
#include <limits>
|
||||
|
||||
#include "src/base/compiler-specific.h"
|
||||
#include "src/base/format-macros.h"
|
||||
#include "src/base/logging.h"
|
||||
@ -167,17 +169,22 @@ V8_INLINE Dest bit_cast(Source const& source) {
|
||||
#define DISABLE_ASAN
|
||||
#endif
|
||||
|
||||
// DISABLE_CFI_PERF -- Disable Control Flow Integrity checks for Perf reasons.
|
||||
#if !defined(DISABLE_CFI_PERF)
|
||||
// Helper macro to define no_sanitize attributes only with clang.
|
||||
#if defined(__clang__) && defined(__has_attribute)
|
||||
#if __has_attribute(no_sanitize)
|
||||
#define DISABLE_CFI_PERF __attribute__((no_sanitize("cfi")))
|
||||
#define CLANG_NO_SANITIZE(what) __attribute__((no_sanitize(what)))
|
||||
#endif
|
||||
#endif
|
||||
#if !defined(CLANG_NO_SANITIZE)
|
||||
#define CLANG_NO_SANITIZE(what)
|
||||
#endif
|
||||
#if !defined(DISABLE_CFI_PERF)
|
||||
#define DISABLE_CFI_PERF
|
||||
#endif
|
||||
|
||||
// DISABLE_CFI_PERF -- Disable Control Flow Integrity checks for Perf reasons.
|
||||
#define DISABLE_CFI_PERF CLANG_NO_SANITIZE("cfi")
|
||||
|
||||
// DISABLE_CFI_ICALL -- Disable Control Flow Integrity indirect call checks,
|
||||
// useful because calls into JITed code can not be CFI verified.
|
||||
#define DISABLE_CFI_ICALL CLANG_NO_SANITIZE("cfi-icall")
|
||||
|
||||
#if V8_CC_GNU
|
||||
#define V8_IMMEDIATE_CRASH() __builtin_trap()
|
||||
@ -214,34 +221,16 @@ struct Use {
|
||||
// than defining __STDC_CONSTANT_MACROS before including <stdint.h>, and it
|
||||
// works on compilers that don't have it (like MSVC).
|
||||
#if V8_CC_MSVC
|
||||
# define V8_UINT64_C(x) (x ## UI64)
|
||||
# define V8_INT64_C(x) (x ## I64)
|
||||
# if V8_HOST_ARCH_64_BIT
|
||||
# define V8_INTPTR_C(x) (x ## I64)
|
||||
# define V8_PTR_PREFIX "ll"
|
||||
# else
|
||||
# define V8_INTPTR_C(x) (x)
|
||||
# define V8_PTR_PREFIX ""
|
||||
# endif // V8_HOST_ARCH_64_BIT
|
||||
#elif V8_CC_MINGW64
|
||||
# define V8_UINT64_C(x) (x ## ULL)
|
||||
# define V8_INT64_C(x) (x ## LL)
|
||||
# define V8_INTPTR_C(x) (x ## LL)
|
||||
# define V8_PTR_PREFIX "I64"
|
||||
#elif V8_HOST_ARCH_64_BIT
|
||||
# if V8_OS_MACOSX || V8_OS_OPENBSD
|
||||
# define V8_UINT64_C(x) (x ## ULL)
|
||||
# define V8_INT64_C(x) (x ## LL)
|
||||
# else
|
||||
# define V8_UINT64_C(x) (x ## UL)
|
||||
# define V8_INT64_C(x) (x ## L)
|
||||
# endif
|
||||
# define V8_INTPTR_C(x) (x ## L)
|
||||
# define V8_PTR_PREFIX "l"
|
||||
#else
|
||||
# define V8_UINT64_C(x) (x ## ULL)
|
||||
# define V8_INT64_C(x) (x ## LL)
|
||||
# define V8_INTPTR_C(x) (x)
|
||||
#if V8_OS_AIX
|
||||
#define V8_PTR_PREFIX "l"
|
||||
#else
|
||||
@ -329,4 +318,24 @@ inline void* AlignedAddress(void* address, size_t alignment) {
|
||||
~static_cast<uintptr_t>(alignment - 1));
|
||||
}
|
||||
|
||||
// Bounds checks for float to integer conversions, which does truncation. Hence,
|
||||
// the range of legal values is (min - 1, max + 1).
|
||||
template <typename int_t, typename float_t, typename biggest_int_t = int64_t>
|
||||
bool is_inbounds(float_t v) {
|
||||
static_assert(sizeof(int_t) < sizeof(biggest_int_t),
|
||||
"int_t can't be bounds checked by the compiler");
|
||||
constexpr float_t kLowerBound =
|
||||
static_cast<float_t>(std::numeric_limits<int_t>::min()) - 1;
|
||||
constexpr float_t kUpperBound =
|
||||
static_cast<float_t>(std::numeric_limits<int_t>::max()) + 1;
|
||||
constexpr bool kLowerBoundIsMin =
|
||||
static_cast<biggest_int_t>(kLowerBound) ==
|
||||
static_cast<biggest_int_t>(std::numeric_limits<int_t>::min());
|
||||
constexpr bool kUpperBoundIsMax =
|
||||
static_cast<biggest_int_t>(kUpperBound) ==
|
||||
static_cast<biggest_int_t>(std::numeric_limits<int_t>::max());
|
||||
return (kLowerBoundIsMin ? (kLowerBound <= v) : (kLowerBound < v)) &&
|
||||
(kUpperBoundIsMax ? (v <= kUpperBound) : (v < kUpperBound));
|
||||
}
|
||||
|
||||
#endif // V8_BASE_MACROS_H_
|
||||
|
4
deps/v8/src/base/once.cc
vendored
4
deps/v8/src/base/once.cc
vendored
@ -15,7 +15,7 @@
|
||||
namespace v8 {
|
||||
namespace base {
|
||||
|
||||
void CallOnceImpl(OnceType* once, PointerArgFunction init_func, void* arg) {
|
||||
void CallOnceImpl(OnceType* once, std::function<void()> init_func) {
|
||||
AtomicWord state = Acquire_Load(once);
|
||||
// Fast path. The provided function was already executed.
|
||||
if (state == ONCE_STATE_DONE) {
|
||||
@ -34,7 +34,7 @@ void CallOnceImpl(OnceType* once, PointerArgFunction init_func, void* arg) {
|
||||
if (state == ONCE_STATE_UNINITIALIZED) {
|
||||
// We are the first thread to call this function, so we have to call the
|
||||
// function.
|
||||
init_func(arg);
|
||||
init_func();
|
||||
Release_Store(once, ONCE_STATE_DONE);
|
||||
} else {
|
||||
// Another thread has already started executing the function. We need to
|
||||
|
11
deps/v8/src/base/once.h
vendored
11
deps/v8/src/base/once.h
vendored
@ -53,6 +53,7 @@
|
||||
#define V8_BASE_ONCE_H_
|
||||
|
||||
#include <stddef.h>
|
||||
#include <functional>
|
||||
|
||||
#include "src/base/atomicops.h"
|
||||
#include "src/base/base-export.h"
|
||||
@ -80,13 +81,12 @@ struct OneArgFunction {
|
||||
typedef void (*type)(T);
|
||||
};
|
||||
|
||||
V8_BASE_EXPORT void CallOnceImpl(OnceType* once, PointerArgFunction init_func,
|
||||
void* arg);
|
||||
V8_BASE_EXPORT void CallOnceImpl(OnceType* once,
|
||||
std::function<void()> init_func);
|
||||
|
||||
inline void CallOnce(OnceType* once, NoArgFunction init_func) {
|
||||
if (Acquire_Load(once) != ONCE_STATE_DONE) {
|
||||
CallOnceImpl(once, reinterpret_cast<PointerArgFunction>(init_func),
|
||||
nullptr);
|
||||
CallOnceImpl(once, init_func);
|
||||
}
|
||||
}
|
||||
|
||||
@ -95,8 +95,7 @@ template <typename Arg>
|
||||
inline void CallOnce(OnceType* once,
|
||||
typename OneArgFunction<Arg*>::type init_func, Arg* arg) {
|
||||
if (Acquire_Load(once) != ONCE_STATE_DONE) {
|
||||
CallOnceImpl(once, reinterpret_cast<PointerArgFunction>(init_func),
|
||||
static_cast<void*>(arg));
|
||||
CallOnceImpl(once, [=]() { init_func(arg); });
|
||||
}
|
||||
}
|
||||
|
||||
|
64
deps/v8/src/base/page-allocator.cc
vendored
Normal file
64
deps/v8/src/base/page-allocator.cc
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
// Copyright 2017 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/base/page-allocator.h"
|
||||
|
||||
#include "src/base/platform/platform.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace base {
|
||||
|
||||
#define STATIC_ASSERT_ENUM(a, b) \
|
||||
static_assert(static_cast<int>(a) == static_cast<int>(b), \
|
||||
"mismatching enum: " #a)
|
||||
|
||||
STATIC_ASSERT_ENUM(PageAllocator::kNoAccess,
|
||||
base::OS::MemoryPermission::kNoAccess);
|
||||
STATIC_ASSERT_ENUM(PageAllocator::kReadWrite,
|
||||
base::OS::MemoryPermission::kReadWrite);
|
||||
STATIC_ASSERT_ENUM(PageAllocator::kReadWriteExecute,
|
||||
base::OS::MemoryPermission::kReadWriteExecute);
|
||||
STATIC_ASSERT_ENUM(PageAllocator::kReadExecute,
|
||||
base::OS::MemoryPermission::kReadExecute);
|
||||
|
||||
#undef STATIC_ASSERT_ENUM
|
||||
|
||||
size_t PageAllocator::AllocatePageSize() {
|
||||
return base::OS::AllocatePageSize();
|
||||
}
|
||||
|
||||
size_t PageAllocator::CommitPageSize() { return base::OS::CommitPageSize(); }
|
||||
|
||||
void PageAllocator::SetRandomMmapSeed(int64_t seed) {
|
||||
base::OS::SetRandomMmapSeed(seed);
|
||||
}
|
||||
|
||||
void* PageAllocator::GetRandomMmapAddr() {
|
||||
return base::OS::GetRandomMmapAddr();
|
||||
}
|
||||
|
||||
void* PageAllocator::AllocatePages(void* address, size_t size, size_t alignment,
|
||||
PageAllocator::Permission access) {
|
||||
return base::OS::Allocate(address, size, alignment,
|
||||
static_cast<base::OS::MemoryPermission>(access));
|
||||
}
|
||||
|
||||
bool PageAllocator::FreePages(void* address, size_t size) {
|
||||
return base::OS::Free(address, size);
|
||||
}
|
||||
|
||||
bool PageAllocator::ReleasePages(void* address, size_t size, size_t new_size) {
|
||||
DCHECK_LT(new_size, size);
|
||||
return base::OS::Release(reinterpret_cast<uint8_t*>(address) + new_size,
|
||||
size - new_size);
|
||||
}
|
||||
|
||||
bool PageAllocator::SetPermissions(void* address, size_t size,
|
||||
PageAllocator::Permission access) {
|
||||
return base::OS::SetPermissions(
|
||||
address, size, static_cast<base::OS::MemoryPermission>(access));
|
||||
}
|
||||
|
||||
} // namespace base
|
||||
} // namespace v8
|
41
deps/v8/src/base/page-allocator.h
vendored
Normal file
41
deps/v8/src/base/page-allocator.h
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
// Copyright 2017 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_BASE_PAGE_ALLOCATOR_H_
|
||||
#define V8_BASE_PAGE_ALLOCATOR_H_
|
||||
|
||||
#include "include/v8-platform.h"
|
||||
#include "src/base/base-export.h"
|
||||
#include "src/base/compiler-specific.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace base {
|
||||
|
||||
class V8_BASE_EXPORT PageAllocator
|
||||
: public NON_EXPORTED_BASE(::v8::PageAllocator) {
|
||||
public:
|
||||
virtual ~PageAllocator() = default;
|
||||
|
||||
size_t AllocatePageSize() override;
|
||||
|
||||
size_t CommitPageSize() override;
|
||||
|
||||
void SetRandomMmapSeed(int64_t seed) override;
|
||||
|
||||
void* GetRandomMmapAddr() override;
|
||||
|
||||
void* AllocatePages(void* address, size_t size, size_t alignment,
|
||||
PageAllocator::Permission access) override;
|
||||
|
||||
bool FreePages(void* address, size_t size) override;
|
||||
|
||||
bool ReleasePages(void* address, size_t size, size_t new_size) override;
|
||||
|
||||
bool SetPermissions(void* address, size_t size,
|
||||
PageAllocator::Permission access) override;
|
||||
};
|
||||
|
||||
} // namespace base
|
||||
} // namespace v8
|
||||
#endif // V8_BASE_PAGE_ALLOCATOR_H_
|
@ -124,12 +124,11 @@ bool OS::HasLazyCommits() {
|
||||
}
|
||||
|
||||
std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
|
||||
CHECK(false); // TODO(scottmg): Port, https://crbug.com/731217.
|
||||
return std::vector<SharedLibraryAddress>();
|
||||
UNREACHABLE(); // TODO(scottmg): Port, https://crbug.com/731217.
|
||||
}
|
||||
|
||||
void OS::SignalCodeMovingGC() {
|
||||
CHECK(false); // TODO(scottmg): Port, https://crbug.com/731217.
|
||||
UNREACHABLE(); // TODO(scottmg): Port, https://crbug.com/731217.
|
||||
}
|
||||
|
||||
} // namespace base
|
||||
|
58
deps/v8/src/base/platform/platform-posix.cc
vendored
58
deps/v8/src/base/platform/platform-posix.cc
vendored
@ -89,6 +89,7 @@ const char* g_gc_fake_mmap = nullptr;
|
||||
|
||||
static LazyInstance<RandomNumberGenerator>::type
|
||||
platform_random_number_generator = LAZY_INSTANCE_INITIALIZER;
|
||||
static LazyMutex rng_mutex = LAZY_MUTEX_INITIALIZER;
|
||||
|
||||
#if !V8_OS_FUCHSIA
|
||||
#if V8_OS_MACOSX
|
||||
@ -130,11 +131,9 @@ int GetFlagsForMemoryPermission(OS::MemoryPermission access) {
|
||||
}
|
||||
|
||||
void* Allocate(void* address, size_t size, OS::MemoryPermission access) {
|
||||
const size_t actual_size = RoundUp(size, OS::AllocatePageSize());
|
||||
int prot = GetProtectionFromMemoryPermission(access);
|
||||
int flags = GetFlagsForMemoryPermission(access);
|
||||
void* result =
|
||||
mmap(address, actual_size, prot, flags, kMmapFd, kMmapFdOffset);
|
||||
void* result = mmap(address, size, prot, flags, kMmapFd, kMmapFdOffset);
|
||||
if (result == MAP_FAILED) return nullptr;
|
||||
return result;
|
||||
}
|
||||
@ -167,11 +166,7 @@ int ReclaimInaccessibleMemory(void* address, size_t size) {
|
||||
|
||||
} // namespace
|
||||
|
||||
void OS::Initialize(int64_t random_seed, bool hard_abort,
|
||||
const char* const gc_fake_mmap) {
|
||||
if (random_seed) {
|
||||
platform_random_number_generator.Pointer()->SetSeed(random_seed);
|
||||
}
|
||||
void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) {
|
||||
g_hard_abort = hard_abort;
|
||||
g_gc_fake_mmap = gc_fake_mmap;
|
||||
}
|
||||
@ -206,46 +201,61 @@ size_t OS::CommitPageSize() {
|
||||
return page_size;
|
||||
}
|
||||
|
||||
// static
|
||||
void OS::SetRandomMmapSeed(int64_t seed) {
|
||||
if (seed) {
|
||||
LockGuard<Mutex> guard(rng_mutex.Pointer());
|
||||
platform_random_number_generator.Pointer()->SetSeed(seed);
|
||||
}
|
||||
}
|
||||
|
||||
// static
|
||||
void* OS::GetRandomMmapAddr() {
|
||||
#if defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
|
||||
defined(THREAD_SANITIZER)
|
||||
// Dynamic tools do not support custom mmap addresses.
|
||||
return nullptr;
|
||||
#endif
|
||||
uintptr_t raw_addr;
|
||||
platform_random_number_generator.Pointer()->NextBytes(&raw_addr,
|
||||
sizeof(raw_addr));
|
||||
{
|
||||
LockGuard<Mutex> guard(rng_mutex.Pointer());
|
||||
platform_random_number_generator.Pointer()->NextBytes(&raw_addr,
|
||||
sizeof(raw_addr));
|
||||
}
|
||||
#if defined(V8_USE_ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
|
||||
defined(THREAD_SANITIZER) || defined(LEAK_SANITIZER)
|
||||
// If random hint addresses interfere with address ranges hard coded in
|
||||
// sanitizers, bad things happen. This address range is copied from TSAN
|
||||
// source but works with all tools.
|
||||
// See crbug.com/539863.
|
||||
raw_addr &= 0x007fffff0000ULL;
|
||||
raw_addr += 0x7e8000000000ULL;
|
||||
#else
|
||||
#if V8_TARGET_ARCH_X64
|
||||
// Currently available CPUs have 48 bits of virtual addressing. Truncate
|
||||
// the hint address to 46 bits to give the kernel a fighting chance of
|
||||
// fulfilling our placement request.
|
||||
raw_addr &= V8_UINT64_C(0x3ffffffff000);
|
||||
raw_addr &= uint64_t{0x3FFFFFFFF000};
|
||||
#elif V8_TARGET_ARCH_PPC64
|
||||
#if V8_OS_AIX
|
||||
// AIX: 64 bits of virtual addressing, but we limit address range to:
|
||||
// a) minimize Segment Lookaside Buffer (SLB) misses and
|
||||
raw_addr &= V8_UINT64_C(0x3ffff000);
|
||||
raw_addr &= uint64_t{0x3FFFF000};
|
||||
// Use extra address space to isolate the mmap regions.
|
||||
raw_addr += V8_UINT64_C(0x400000000000);
|
||||
raw_addr += uint64_t{0x400000000000};
|
||||
#elif V8_TARGET_BIG_ENDIAN
|
||||
// Big-endian Linux: 44 bits of virtual addressing.
|
||||
raw_addr &= V8_UINT64_C(0x03fffffff000);
|
||||
raw_addr &= uint64_t{0x03FFFFFFF000};
|
||||
#else
|
||||
// Little-endian Linux: 48 bits of virtual addressing.
|
||||
raw_addr &= V8_UINT64_C(0x3ffffffff000);
|
||||
raw_addr &= uint64_t{0x3FFFFFFFF000};
|
||||
#endif
|
||||
#elif V8_TARGET_ARCH_S390X
|
||||
// Linux on Z uses bits 22-32 for Region Indexing, which translates to 42 bits
|
||||
// of virtual addressing. Truncate to 40 bits to allow kernel chance to
|
||||
// fulfill request.
|
||||
raw_addr &= V8_UINT64_C(0xfffffff000);
|
||||
raw_addr &= uint64_t{0xFFFFFFF000};
|
||||
#elif V8_TARGET_ARCH_S390
|
||||
// 31 bits of virtual addressing. Truncate to 29 bits to allow kernel chance
|
||||
// to fulfill request.
|
||||
raw_addr &= 0x1ffff000;
|
||||
raw_addr &= 0x1FFFF000;
|
||||
#else
|
||||
raw_addr &= 0x3ffff000;
|
||||
raw_addr &= 0x3FFFF000;
|
||||
|
||||
#ifdef __sun
|
||||
// For our Solaris/illumos mmap hint, we pick a random address in the bottom
|
||||
@ -268,6 +278,7 @@ void* OS::GetRandomMmapAddr() {
|
||||
// 10.6 and 10.7.
|
||||
raw_addr += 0x20000000;
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
return reinterpret_cast<void*>(raw_addr);
|
||||
}
|
||||
@ -283,6 +294,7 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
|
||||
address = AlignedAddress(address, alignment);
|
||||
// Add the maximum misalignment so we are guaranteed an aligned base address.
|
||||
size_t request_size = size + (alignment - page_size);
|
||||
request_size = RoundUp(request_size, OS::AllocatePageSize());
|
||||
void* result = base::Allocate(address, request_size, access);
|
||||
if (result == nullptr) return nullptr;
|
||||
|
||||
|
33
deps/v8/src/base/platform/platform-win32.cc
vendored
33
deps/v8/src/base/platform/platform-win32.cc
vendored
@ -674,8 +674,15 @@ void OS::StrNCpy(char* dest, int length, const char* src, size_t n) {
|
||||
#undef _TRUNCATE
|
||||
#undef STRUNCATE
|
||||
|
||||
// The allocation alignment is the guaranteed alignment for
|
||||
// VirtualAlloc'ed blocks of memory.
|
||||
static LazyInstance<RandomNumberGenerator>::type
|
||||
platform_random_number_generator = LAZY_INSTANCE_INITIALIZER;
|
||||
static LazyMutex rng_mutex = LAZY_MUTEX_INITIALIZER;
|
||||
|
||||
void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) {
|
||||
g_hard_abort = hard_abort;
|
||||
}
|
||||
|
||||
// static
|
||||
size_t OS::AllocatePageSize() {
|
||||
static size_t allocate_alignment = 0;
|
||||
if (allocate_alignment == 0) {
|
||||
@ -686,6 +693,7 @@ size_t OS::AllocatePageSize() {
|
||||
return allocate_alignment;
|
||||
}
|
||||
|
||||
// static
|
||||
size_t OS::CommitPageSize() {
|
||||
static size_t page_size = 0;
|
||||
if (page_size == 0) {
|
||||
@ -697,17 +705,15 @@ size_t OS::CommitPageSize() {
|
||||
return page_size;
|
||||
}
|
||||
|
||||
static LazyInstance<RandomNumberGenerator>::type
|
||||
platform_random_number_generator = LAZY_INSTANCE_INITIALIZER;
|
||||
|
||||
void OS::Initialize(int64_t random_seed, bool hard_abort,
|
||||
const char* const gc_fake_mmap) {
|
||||
if (random_seed) {
|
||||
platform_random_number_generator.Pointer()->SetSeed(random_seed);
|
||||
// static
|
||||
void OS::SetRandomMmapSeed(int64_t seed) {
|
||||
if (seed) {
|
||||
LockGuard<Mutex> guard(rng_mutex.Pointer());
|
||||
platform_random_number_generator.Pointer()->SetSeed(seed);
|
||||
}
|
||||
g_hard_abort = hard_abort;
|
||||
}
|
||||
|
||||
// static
|
||||
void* OS::GetRandomMmapAddr() {
|
||||
// The address range used to randomize RWX allocations in OS::Allocate
|
||||
// Try not to map pages into the default range that windows loads DLLs
|
||||
@ -722,8 +728,11 @@ void* OS::GetRandomMmapAddr() {
|
||||
static const uintptr_t kAllocationRandomAddressMax = 0x3FFF0000;
|
||||
#endif
|
||||
uintptr_t address;
|
||||
platform_random_number_generator.Pointer()->NextBytes(&address,
|
||||
sizeof(address));
|
||||
{
|
||||
LockGuard<Mutex> guard(rng_mutex.Pointer());
|
||||
platform_random_number_generator.Pointer()->NextBytes(&address,
|
||||
sizeof(address));
|
||||
}
|
||||
address <<= kPageSizeBits;
|
||||
address += kAllocationRandomAddressMin;
|
||||
address &= kAllocationRandomAddressMax;
|
||||
|
68
deps/v8/src/base/platform/platform.h
vendored
68
deps/v8/src/base/platform/platform.h
vendored
@ -36,6 +36,7 @@
|
||||
#endif
|
||||
|
||||
namespace v8 {
|
||||
|
||||
namespace base {
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
@ -93,10 +94,9 @@ inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
|
||||
|
||||
#endif // V8_NO_FAST_TLS
|
||||
|
||||
|
||||
class PageAllocator;
|
||||
class TimezoneCache;
|
||||
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// OS
|
||||
//
|
||||
@ -107,11 +107,9 @@ class TimezoneCache;
|
||||
class V8_BASE_EXPORT OS {
|
||||
public:
|
||||
// Initialize the OS class.
|
||||
// - random_seed: Used for the GetRandomMmapAddress() if non-zero.
|
||||
// - hard_abort: If true, OS::Abort() will crash instead of aborting.
|
||||
// - gc_fake_mmap: Name of the file for fake gc mmap used in ll_prof.
|
||||
static void Initialize(int64_t random_seed, bool hard_abort,
|
||||
const char* const gc_fake_mmap);
|
||||
static void Initialize(bool hard_abort, const char* const gc_fake_mmap);
|
||||
|
||||
// Returns the accumulated user time for thread. This routine
|
||||
// can be used for profiling. The implementation should
|
||||
@ -157,6 +155,8 @@ class V8_BASE_EXPORT OS {
|
||||
static PRINTF_FORMAT(1, 2) void PrintError(const char* format, ...);
|
||||
static PRINTF_FORMAT(1, 0) void VPrintError(const char* format, va_list args);
|
||||
|
||||
// Memory permissions. These should be kept in sync with the ones in
|
||||
// v8::PageAllocator.
|
||||
enum class MemoryPermission {
|
||||
kNoAccess,
|
||||
kReadWrite,
|
||||
@ -165,40 +165,6 @@ class V8_BASE_EXPORT OS {
|
||||
kReadExecute
|
||||
};
|
||||
|
||||
// Gets the page granularity for Allocate. Addresses returned by Allocate are
|
||||
// aligned to this size.
|
||||
static size_t AllocatePageSize();
|
||||
|
||||
// Gets the granularity at which the permissions and commit calls can be made.
|
||||
static size_t CommitPageSize();
|
||||
|
||||
// Generate a random address to be used for hinting allocation calls.
|
||||
static void* GetRandomMmapAddr();
|
||||
|
||||
// Allocates memory. Permissions are set according to the access argument.
|
||||
// The address parameter is a hint. The size and alignment parameters must be
|
||||
// multiples of AllocatePageSize(). Returns the address of the allocated
|
||||
// memory, with the specified size and alignment, or nullptr on failure.
|
||||
V8_WARN_UNUSED_RESULT static void* Allocate(void* address, size_t size,
|
||||
size_t alignment,
|
||||
MemoryPermission access);
|
||||
|
||||
// Frees memory allocated by a call to Allocate. address and size must be
|
||||
// multiples of AllocatePageSize(). Returns true on success, otherwise false.
|
||||
V8_WARN_UNUSED_RESULT static bool Free(void* address, const size_t size);
|
||||
|
||||
// Releases memory that is no longer needed. The range specified by address
|
||||
// and size must be part of an allocated memory region, and must be multiples
|
||||
// of CommitPageSize(). Released memory is left in an undefined state, so it
|
||||
// should not be accessed. Returns true on success, otherwise false.
|
||||
V8_WARN_UNUSED_RESULT static bool Release(void* address, size_t size);
|
||||
|
||||
// Sets permissions according to the access argument. address and size must be
|
||||
// multiples of CommitPageSize(). Setting permission to kNoAccess may cause
|
||||
// the memory contents to be lost. Returns true on success, otherwise false.
|
||||
V8_WARN_UNUSED_RESULT static bool SetPermissions(void* address, size_t size,
|
||||
MemoryPermission access);
|
||||
|
||||
static bool HasLazyCommits();
|
||||
|
||||
// Sleep for a specified time interval.
|
||||
@ -280,6 +246,30 @@ class V8_BASE_EXPORT OS {
|
||||
static int GetCurrentThreadId();
|
||||
|
||||
private:
|
||||
// These classes use the private memory management API below.
|
||||
friend class MemoryMappedFile;
|
||||
friend class PosixMemoryMappedFile;
|
||||
friend class v8::base::PageAllocator;
|
||||
|
||||
static size_t AllocatePageSize();
|
||||
|
||||
static size_t CommitPageSize();
|
||||
|
||||
static void SetRandomMmapSeed(int64_t seed);
|
||||
|
||||
static void* GetRandomMmapAddr();
|
||||
|
||||
V8_WARN_UNUSED_RESULT static void* Allocate(void* address, size_t size,
|
||||
size_t alignment,
|
||||
MemoryPermission access);
|
||||
|
||||
V8_WARN_UNUSED_RESULT static bool Free(void* address, const size_t size);
|
||||
|
||||
V8_WARN_UNUSED_RESULT static bool Release(void* address, size_t size);
|
||||
|
||||
V8_WARN_UNUSED_RESULT static bool SetPermissions(void* address, size_t size,
|
||||
MemoryPermission access);
|
||||
|
||||
static const int msPerSecond = 1000;
|
||||
|
||||
#if V8_OS_POSIX
|
||||
|
2
deps/v8/src/base/platform/semaphore.cc
vendored
2
deps/v8/src/base/platform/semaphore.cc
vendored
@ -136,7 +136,7 @@ bool Semaphore::WaitFor(const TimeDelta& rel_time) {
|
||||
|
||||
Semaphore::Semaphore(int count) {
|
||||
DCHECK_GE(count, 0);
|
||||
native_handle_ = ::CreateSemaphoreA(nullptr, count, 0x7fffffff, nullptr);
|
||||
native_handle_ = ::CreateSemaphoreA(nullptr, count, 0x7FFFFFFF, nullptr);
|
||||
DCHECK_NOT_NULL(native_handle_);
|
||||
}
|
||||
|
||||
|
3
deps/v8/src/base/platform/time.cc
vendored
3
deps/v8/src/base/platform/time.cc
vendored
@ -298,8 +298,7 @@ Time Time::NowFromSystemTime() {
|
||||
|
||||
|
||||
// Time between windows epoch and standard epoch.
|
||||
static const int64_t kTimeToEpochInMicroseconds = V8_INT64_C(11644473600000000);
|
||||
|
||||
static const int64_t kTimeToEpochInMicroseconds = int64_t{11644473600000000};
|
||||
|
||||
Time Time::FromFiletime(FILETIME ft) {
|
||||
if (ft.dwLowDateTime == 0 && ft.dwHighDateTime == 0) {
|
||||
|
3
deps/v8/src/base/safe_conversions.h
vendored
3
deps/v8/src/base/safe_conversions.h
vendored
@ -53,8 +53,7 @@ inline Dst saturated_cast(Src value) {
|
||||
|
||||
// Should fail only on attempting to assign NaN to a saturated integer.
|
||||
case internal::RANGE_INVALID:
|
||||
CHECK(false);
|
||||
return std::numeric_limits<Dst>::max();
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
UNREACHABLE();
|
||||
|
@ -213,9 +213,9 @@ void RandomNumberGenerator::SetSeed(int64_t seed) {
|
||||
|
||||
uint64_t RandomNumberGenerator::MurmurHash3(uint64_t h) {
|
||||
h ^= h >> 33;
|
||||
h *= V8_UINT64_C(0xFF51AFD7ED558CCD);
|
||||
h *= uint64_t{0xFF51AFD7ED558CCD};
|
||||
h ^= h >> 33;
|
||||
h *= V8_UINT64_C(0xC4CEB9FE1A85EC53);
|
||||
h *= uint64_t{0xC4CEB9FE1A85EC53};
|
||||
h ^= h >> 33;
|
||||
return h;
|
||||
}
|
||||
|
@ -113,8 +113,8 @@ class V8_BASE_EXPORT RandomNumberGenerator final {
|
||||
// Static and exposed for external use.
|
||||
static inline double ToDouble(uint64_t state0, uint64_t state1) {
|
||||
// Exponent for double values for [1.0 .. 2.0)
|
||||
static const uint64_t kExponentBits = V8_UINT64_C(0x3FF0000000000000);
|
||||
static const uint64_t kMantissaMask = V8_UINT64_C(0x000FFFFFFFFFFFFF);
|
||||
static const uint64_t kExponentBits = uint64_t{0x3FF0000000000000};
|
||||
static const uint64_t kMantissaMask = uint64_t{0x000FFFFFFFFFFFFF};
|
||||
uint64_t random = ((state0 + state1) & kMantissaMask) | kExponentBits;
|
||||
return bit_cast<double>(random) - 1;
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user