deps: update V8 to 6.9.427.22

PR-URL: https://github.com/nodejs/node/pull/21983
Reviewed-By: Refael Ackermann <refack@gmail.com>
Reviewed-By: Gus Caplan <me@gus.host>
Reviewed-By: Ujjwal Sharma <usharma1998@gmail.com>
Reviewed-By: Matteo Collina <matteo.collina@gmail.com>
This commit is contained in:
Michaël Zasso 2018-09-07 17:07:13 +02:00
parent 12ed7c94e5
commit 586db2414a
No known key found for this signature in database
GPG Key ID: 770F7A9A5AE15600
1543 changed files with 86067 additions and 72238 deletions

View File

@ -1,2 +1,5 @@
# Automatically normalize line endings (to LF) for all text-based files.
* text=auto
* text=auto eol=lf
# Do not modify line endings for binary files (which are sometimes auto
# detected as text files by git).
*.png binary

5
deps/v8/.gitignore vendored
View File

@ -105,5 +105,10 @@ turbo*.dot
turbo*.json
v8.ignition_dispatches_table.json
/Default/
node_modules
tools/turbolizer/build
tools/turbolizer/.rpt2_cache
tools/turbolizer/deploy
!/third_party/jinja2
!/third_party/markupsafe

6
deps/v8/AUTHORS vendored
View File

@ -32,7 +32,6 @@ Facebook, Inc. <*@fb.com>
Facebook, Inc. <*@oculus.com>
Vewd Software AS <*@vewd.com>
Groupon <*@groupon.com>
Meteor Development Group <*@meteor.com>
Cloudflare, Inc. <*@cloudflare.com>
Aaron Bieber <deftly@gmail.com>
@ -50,7 +49,6 @@ Andrei Kashcha <anvaka@gmail.com>
Anna Henningsen <anna@addaleax.net>
Bangfu Tao <bangfu.tao@samsung.com>
Ben Coe <ben@npmjs.com>
Ben Newman <ben@meteor.com>
Ben Noordhuis <info@bnoordhuis.nl>
Benjamin Tan <demoneaux@gmail.com>
Bert Belder <bertbelder@gmail.com>
@ -75,6 +73,7 @@ Felix Geisendörfer <haimuiba@gmail.com>
Filipe David Manana <fdmanana@gmail.com>
Franziska Hinkelmann <franziska.hinkelmann@gmail.com>
Geoffrey Garside <ggarside@gmail.com>
Gergely Nagy <ngg@ngg.hu>
Gus Caplan <me@gus.host>
Gwang Yoon Hwang <ryumiel@company100.net>
Henrique Ferreiro <henrique.ferreiro@gmail.com>
@ -88,7 +87,6 @@ Jan de Mooij <jandemooij@gmail.com>
Jan Krems <jan.krems@gmail.com>
Jay Freeman <saurik@saurik.com>
James Pike <g00gle@chilon.net>
James M Snell <jasnell@gmail.com>
Jianghua Yang <jianghua.yjh@alibaba-inc.com>
Joel Stanley <joel@jms.id.au>
Johan Bergström <johan@bergstroem.nu>
@ -151,6 +149,8 @@ Taketoshi Aono <brn@b6n.ch>
Teddy Katz <teddy.katz@gmail.com>
Tiancheng "Timothy" Gu <timothygu99@gmail.com>
Tobias Burnus <burnus@net-b.de>
Tobias Nießen <tniessen@tnie.de>
Ujjwal Sharma <usharma1998@gmail.com>
Victor Costan <costan@gmail.com>
Vlad Burlik <vladbph@gmail.com>
Vladimir Krivosheev <develar@gmail.com>

137
deps/v8/BUILD.gn vendored
View File

@ -22,6 +22,13 @@ declare_args() {
# Print to stdout on Android.
v8_android_log_stdout = false
# Dynamically set an additional dependency from v8/custom_deps.
v8_custom_deps = ""
# Turns on deprecation warnings for HeapObject::GetIsolate,
# HeapObject::GetHeap, Handle(T* obj) and handle(T* obj).
v8_deprecate_get_isolate = false
# Turns on all V8 debug features. Enables running V8 in a pseudo debug mode
# within a release Chrome.
v8_enable_debugging_features = is_debug
@ -70,12 +77,17 @@ declare_args() {
# Enable embedded builtins.
# TODO(jgruber,v8:6666): Support ia32 and maybe MSVC.
# TODO(jgruber,v8:6666): Re-enable.
v8_enable_embedded_builtins = false
# TODO(jgruber,v8:6666): Enable for remaining architectures once performance
# regressions are addressed.
v8_enable_embedded_builtins =
v8_use_snapshot && v8_current_cpu == "x64" && (!is_win || is_clang)
# Enable code-generation-time checking of types in the CodeStubAssembler.
v8_enable_verify_csa = false
# Enable pointer compression (sets -dV8_COMPRESS_POINTERS).
v8_enable_pointer_compression = false
# Interpreted regexp engine exists as platform-independent alternative
# based where the regular expression is compiled to a bytecode.
v8_interpreted_regexp = false
@ -176,6 +188,9 @@ if (v8_check_microtasks_scopes_consistency == "") {
v8_enable_debugging_features || dcheck_always_on
}
assert(!v8_enable_embedded_builtins || v8_use_snapshot,
"Embedded builtins only work with snapshots")
# Specifies if the target build is a simulator build. Comparing target cpu
# with v8 target cpu to not affect simulator builds for making cross-compile
# snapshots.
@ -195,8 +210,10 @@ config("internal_config") {
"$target_gen_dir",
]
defines = []
if (is_component_build) {
defines = [ "BUILDING_V8_SHARED" ]
defines += [ "BUILDING_V8_SHARED" ]
}
}
@ -286,6 +303,9 @@ config("features") {
if (v8_enable_minor_mc) {
defines += [ "ENABLE_MINOR_MC" ]
}
if (v8_enable_pointer_compression) {
defines += [ "V8_COMPRESS_POINTERS" ]
}
if (v8_enable_object_print) {
defines += [ "OBJECT_PRINT" ]
}
@ -320,6 +340,9 @@ config("features") {
if (v8_imminent_deprecation_warnings) {
defines += [ "V8_IMMINENT_DEPRECATION_WARNINGS" ]
}
if (v8_deprecate_get_isolate) {
defines += [ "DEPRECATE_GET_ISOLATE" ]
}
if (v8_enable_i18n_support) {
defines += [ "V8_INTL_SUPPORT" ]
}
@ -563,6 +586,10 @@ config("toolchain") {
v8_current_cpu == "mips64el") {
cflags += [ "-Wshorten-64-to-32" ]
}
if (v8_deprecate_get_isolate) {
cflags += [ "-Wno-error=deprecated" ]
}
}
if (is_win) {
@ -636,9 +663,6 @@ action("js2c") {
"src/js/prologue.js",
"src/js/array.js",
"src/js/typedarray.js",
"src/debug/mirrors.js",
"src/debug/debug.js",
"src/debug/liveedit.js",
]
outputs = [
@ -828,7 +852,6 @@ action("postmortem-metadata") {
"src/objects/js-regexp-string-iterator.h",
"src/objects/map.h",
"src/objects/map-inl.h",
"src/objects/scope-info.h",
"src/objects/script.h",
"src/objects/script-inl.h",
"src/objects/shared-function-info.h",
@ -848,7 +871,10 @@ action("postmortem-metadata") {
torque_files = [
"src/builtins/base.tq",
"src/builtins/array.tq",
"src/builtins/array-foreach.tq",
"src/builtins/array-sort.tq",
"src/builtins/typed-array.tq",
"src/builtins/data-view.tq",
"test/torque/test-torque.tq",
]
@ -856,6 +882,7 @@ torque_modules = [
"base",
"array",
"typed-array",
"data-view",
"test",
]
@ -906,7 +933,7 @@ action("run_torque") {
}
}
v8_source_set("torque_generated_core") {
v8_header_set("torque_generated_core") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
deps = [
@ -927,6 +954,12 @@ v8_source_set("torque_generated_initializers") {
":run_torque",
]
if (v8_enable_i18n_support) {
public_deps = [
"//third_party/icu",
]
}
sources = []
foreach(module, torque_modules) {
sources += [
@ -1036,6 +1069,11 @@ template("run_mksnapshot") {
"--no-turbo-rewrite-far-jumps",
"--no-turbo-verify-allocation",
]
if (v8_enable_debugging_features && v8_enable_slow_dchecks) {
# mksnapshot only accepts this flag if ENABLE_SLOW_DCHECKS is defined.
args += [ "--no-enable-slow-asserts" ]
}
}
}
}
@ -1178,6 +1216,8 @@ if (v8_use_snapshot && !v8_use_external_startup_data) {
if (v8_enable_embedded_builtins) {
sources += [ "$target_gen_dir/embedded.cc" ]
} else {
sources += [ "src/snapshot/embedded-empty.cc" ]
}
if (use_jumbo_build == true) {
@ -1234,6 +1274,8 @@ if (v8_use_snapshot && v8_use_external_startup_data) {
]
}
}
} else {
sources += [ "src/snapshot/embedded-empty.cc" ]
}
configs = [ ":internal_config" ]
@ -1271,6 +1313,7 @@ v8_source_set("v8_initializers") {
"src/builtins/builtins-constructor-gen.h",
"src/builtins/builtins-constructor.h",
"src/builtins/builtins-conversion-gen.cc",
"src/builtins/builtins-data-view-gen.h",
"src/builtins/builtins-date-gen.cc",
"src/builtins/builtins-debug-gen.cc",
"src/builtins/builtins-function-gen.cc",
@ -1283,6 +1326,8 @@ v8_source_set("v8_initializers") {
"src/builtins/builtins-intl-gen.cc",
"src/builtins/builtins-iterator-gen.cc",
"src/builtins/builtins-iterator-gen.h",
"src/builtins/builtins-lazy-gen.cc",
"src/builtins/builtins-lazy-gen.h",
"src/builtins/builtins-math-gen.cc",
"src/builtins/builtins-math-gen.h",
"src/builtins/builtins-number-gen.cc",
@ -1394,6 +1439,11 @@ v8_source_set("v8_init") {
### gcmole(all) ###
"src/setup-isolate-full.cc",
]
if (v8_enable_i18n_support) {
public_deps = [
"//third_party/icu",
]
}
configs = [ ":internal_config" ]
}
@ -1484,8 +1534,6 @@ v8_source_set("v8_base") {
"src/ast/ast-value-factory.h",
"src/ast/ast.cc",
"src/ast/ast.h",
"src/ast/compile-time-value.cc",
"src/ast/compile-time-value.h",
"src/ast/context-slot-cache.cc",
"src/ast/context-slot-cache.h",
"src/ast/modules.cc",
@ -1540,7 +1588,6 @@ v8_source_set("v8_base") {
"src/builtins/builtins-sharedarraybuffer.cc",
"src/builtins/builtins-string.cc",
"src/builtins/builtins-symbol.cc",
"src/builtins/builtins-trace.cc",
"src/builtins/builtins-typed-array.cc",
"src/builtins/builtins-utils.h",
"src/builtins/builtins.cc",
@ -1566,13 +1613,12 @@ v8_source_set("v8_base") {
"src/code-stubs-utils.h",
"src/code-stubs.cc",
"src/code-stubs.h",
"src/code-tracer.h",
"src/codegen.cc",
"src/codegen.h",
"src/collector.h",
"src/compilation-cache.cc",
"src/compilation-cache.h",
"src/compilation-dependencies.cc",
"src/compilation-dependencies.h",
"src/compilation-statistics.cc",
"src/compilation-statistics.h",
"src/compiler-dispatcher/compiler-dispatcher-job.cc",
@ -1618,6 +1664,8 @@ v8_source_set("v8_base") {
"src/compiler/common-operator-reducer.h",
"src/compiler/common-operator.cc",
"src/compiler/common-operator.h",
"src/compiler/compilation-dependencies.cc",
"src/compiler/compilation-dependencies.h",
"src/compiler/compiler-source-position-table.cc",
"src/compiler/compiler-source-position-table.h",
"src/compiler/constant-folding-reducer.cc",
@ -1674,6 +1722,8 @@ v8_source_set("v8_base") {
"src/compiler/js-generic-lowering.h",
"src/compiler/js-graph.cc",
"src/compiler/js-graph.h",
"src/compiler/js-heap-broker.cc",
"src/compiler/js-heap-broker.h",
"src/compiler/js-inlining-heuristic.cc",
"src/compiler/js-inlining-heuristic.h",
"src/compiler/js-inlining.cc",
@ -1912,6 +1962,8 @@ v8_source_set("v8_base") {
"src/heap/gc-idle-time-handler.h",
"src/heap/gc-tracer.cc",
"src/heap/gc-tracer.h",
"src/heap/heap-controller.cc",
"src/heap/heap-controller.h",
"src/heap/heap-inl.h",
"src/heap/heap.cc",
"src/heap/heap.h",
@ -2087,6 +2139,7 @@ v8_source_set("v8_base") {
"src/objects/frame-array.h",
"src/objects/hash-table-inl.h",
"src/objects/hash-table.h",
"src/objects/intl-objects-inl.h",
"src/objects/intl-objects.cc",
"src/objects/intl-objects.h",
"src/objects/js-array-inl.h",
@ -2102,6 +2155,9 @@ v8_source_set("v8_base") {
"src/objects/js-regexp-string-iterator-inl.h",
"src/objects/js-regexp-string-iterator.h",
"src/objects/js-regexp.h",
"src/objects/js-relative-time-format-inl.h",
"src/objects/js-relative-time-format.cc",
"src/objects/js-relative-time-format.h",
"src/objects/literal-objects-inl.h",
"src/objects/literal-objects.cc",
"src/objects/literal-objects.h",
@ -2127,6 +2183,8 @@ v8_source_set("v8_base") {
"src/objects/promise.h",
"src/objects/property-descriptor-object-inl.h",
"src/objects/property-descriptor-object.h",
"src/objects/prototype-info-inl.h",
"src/objects/prototype-info.h",
"src/objects/regexp-match-info.h",
"src/objects/scope-info.cc",
"src/objects/scope-info.h",
@ -2159,7 +2217,6 @@ v8_source_set("v8_base") {
"src/parsing/parsing.cc",
"src/parsing/parsing.h",
"src/parsing/pattern-rewriter.cc",
"src/parsing/preparse-data-format.h",
"src/parsing/preparse-data.cc",
"src/parsing/preparse-data.h",
"src/parsing/preparsed-scope-data.cc",
@ -2235,6 +2292,8 @@ v8_source_set("v8_base") {
"src/register-configuration.cc",
"src/register-configuration.h",
"src/reglist.h",
"src/roots-inl.h",
"src/roots.h",
"src/runtime-profiler.cc",
"src/runtime-profiler.h",
"src/runtime/runtime-array.cc",
@ -2245,7 +2304,6 @@ v8_source_set("v8_base") {
"src/runtime/runtime-compiler.cc",
"src/runtime/runtime-date.cc",
"src/runtime/runtime-debug.cc",
"src/runtime/runtime-error.cc",
"src/runtime/runtime-forin.cc",
"src/runtime/runtime-function.cc",
"src/runtime/runtime-futex.cc",
@ -2254,7 +2312,6 @@ v8_source_set("v8_base") {
"src/runtime/runtime-interpreter.cc",
"src/runtime/runtime-intl.cc",
"src/runtime/runtime-literals.cc",
"src/runtime/runtime-liveedit.cc",
"src/runtime/runtime-maths.cc",
"src/runtime/runtime-module.cc",
"src/runtime/runtime-numbers.cc",
@ -2306,6 +2363,7 @@ v8_source_set("v8_base") {
"src/snapshot/partial-deserializer.h",
"src/snapshot/partial-serializer.cc",
"src/snapshot/partial-serializer.h",
"src/snapshot/references.h",
"src/snapshot/serializer-common.cc",
"src/snapshot/serializer-common.h",
"src/snapshot/serializer.cc",
@ -2352,6 +2410,8 @@ v8_source_set("v8_base") {
"src/trap-handler/handler-shared.cc",
"src/trap-handler/trap-handler-internal.h",
"src/trap-handler/trap-handler.h",
"src/turbo-assembler.cc",
"src/turbo-assembler.h",
"src/type-hints.cc",
"src/type-hints.h",
"src/unicode-cache-inl.h",
@ -2396,6 +2456,8 @@ v8_source_set("v8_base") {
"src/wasm/function-body-decoder.h",
"src/wasm/function-compiler.cc",
"src/wasm/function-compiler.h",
"src/wasm/jump-table-assembler.cc",
"src/wasm/jump-table-assembler.h",
"src/wasm/leb-helper.h",
"src/wasm/local-decl-encoder.cc",
"src/wasm/local-decl-encoder.h",
@ -2412,8 +2474,6 @@ v8_source_set("v8_base") {
"src/wasm/value-type.h",
"src/wasm/wasm-code-manager.cc",
"src/wasm/wasm-code-manager.h",
"src/wasm/wasm-code-specialization.cc",
"src/wasm/wasm-code-specialization.h",
"src/wasm/wasm-constants.h",
"src/wasm/wasm-debug.cc",
"src/wasm/wasm-engine.cc",
@ -2485,6 +2545,7 @@ v8_source_set("v8_base") {
"src/ia32/assembler-ia32.h",
"src/ia32/code-stubs-ia32.cc",
"src/ia32/codegen-ia32.cc",
"src/ia32/constants-ia32.h",
"src/ia32/cpu-ia32.cc",
"src/ia32/deoptimizer-ia32.cc",
"src/ia32/disasm-ia32.cc",
@ -2518,6 +2579,7 @@ v8_source_set("v8_base") {
"src/x64/assembler-x64.h",
"src/x64/code-stubs-x64.cc",
"src/x64/codegen-x64.cc",
"src/x64/constants-x64.h",
"src/x64/cpu-x64.cc",
"src/x64/deoptimizer-x64.cc",
"src/x64/disasm-x64.cc",
@ -2557,7 +2619,6 @@ v8_source_set("v8_base") {
"src/arm/frame-constants-arm.cc",
"src/arm/frame-constants-arm.h",
"src/arm/interface-descriptors-arm.cc",
"src/arm/interface-descriptors-arm.h",
"src/arm/macro-assembler-arm.cc",
"src/arm/macro-assembler-arm.h",
"src/arm/simulator-arm.cc",
@ -2598,7 +2659,6 @@ v8_source_set("v8_base") {
"src/arm64/instrument-arm64.cc",
"src/arm64/instrument-arm64.h",
"src/arm64/interface-descriptors-arm64.cc",
"src/arm64/interface-descriptors-arm64.h",
"src/arm64/macro-assembler-arm64-inl.h",
"src/arm64/macro-assembler-arm64.cc",
"src/arm64/macro-assembler-arm64.h",
@ -2768,11 +2828,15 @@ v8_source_set("v8_base") {
"src/char-predicates.cc",
"src/intl.cc",
"src/intl.h",
"src/objects/intl-objects-inl.h",
"src/objects/intl-objects.cc",
"src/objects/intl-objects.h",
"src/objects/js-locale-inl.h",
"src/objects/js-locale.cc",
"src/objects/js-locale.h",
"src/objects/js-relative-time-format-inl.h",
"src/objects/js-relative-time-format.cc",
"src/objects/js-relative-time-format.h",
"src/runtime/runtime-intl.cc",
]
}
@ -2816,6 +2880,7 @@ v8_component("v8_libbase") {
"src/base/ieee754.h",
"src/base/iterator.h",
"src/base/lazy-instance.h",
"src/base/list.h",
"src/base/logging.cc",
"src/base/logging.h",
"src/base/macros.h",
@ -2925,7 +2990,6 @@ v8_component("v8_libbase") {
"src/base/debug/stack_trace_fuchsia.cc",
"src/base/platform/platform-fuchsia.cc",
]
public_deps += [ "//third_party/fuchsia-sdk:launchpad" ]
} else if (is_mac) {
sources += [
"src/base/debug/stack_trace_posix.cc",
@ -3029,6 +3093,10 @@ v8_source_set("fuzzer_support") {
":v8_libbase",
":v8_libplatform",
]
if (v8_enable_i18n_support) {
public_deps += [ "//third_party/icu" ]
}
}
###############################################################################
@ -3047,7 +3115,6 @@ if (v8_monolithic) {
":v8_libbase",
":v8_libplatform",
":v8_libsampler",
"//build/config:exe_and_shlib_deps",
"//build/win:default_exe_manifest",
]
@ -3075,7 +3142,6 @@ if (v8_use_snapshot && current_toolchain == v8_snapshot_toolchain) {
":v8_libbase",
":v8_libplatform",
":v8_nosnapshot",
"//build/config:exe_and_shlib_deps",
"//build/win:default_exe_manifest",
]
}
@ -3129,7 +3195,6 @@ if (current_toolchain == v8_snapshot_toolchain) {
deps = [
":v8_libbase",
"third_party/antlr4:antlr4",
"//build/config:exe_and_shlib_deps",
"//build/win:default_exe_manifest",
]
@ -3170,6 +3235,11 @@ group("gn_all") {
"tools:gn_all",
]
if (v8_custom_deps != "") {
# Custom dependency from directory under v8/custom_deps.
deps += [ v8_custom_deps ]
}
if (want_v8_shell) {
deps += [ ":v8_shell" ]
}
@ -3285,6 +3355,8 @@ if (is_component_build) {
v8_executable("d8") {
sources = [
"$target_gen_dir/d8-js.cc",
"src/async-hooks-wrapper.cc",
"src/async-hooks-wrapper.h",
"src/d8-console.cc",
"src/d8-console.h",
"src/d8.cc",
@ -3303,7 +3375,6 @@ v8_executable("d8") {
":v8",
":v8_libbase",
":v8_libplatform",
"//build/config:exe_and_shlib_deps",
"//build/win:default_exe_manifest",
]
@ -3340,7 +3411,6 @@ v8_executable("v8_hello_world") {
":v8",
":v8_libbase",
":v8_libplatform",
"//build/config:exe_and_shlib_deps",
"//build/win:default_exe_manifest",
]
}
@ -3361,7 +3431,6 @@ v8_executable("v8_sample_process") {
":v8",
":v8_libbase",
":v8_libplatform",
"//build/config:exe_and_shlib_deps",
"//build/win:default_exe_manifest",
]
}
@ -3383,7 +3452,6 @@ if (want_v8_shell) {
":v8",
":v8_libbase",
":v8_libplatform",
"//build/config:exe_and_shlib_deps",
"//build/win:default_exe_manifest",
]
}
@ -3395,7 +3463,6 @@ template("v8_fuzzer") {
v8_executable("v8_simple_" + name) {
deps = [
":" + name,
"//build/config:exe_and_shlib_deps",
"//build/win:default_exe_manifest",
]
@ -3508,6 +3575,12 @@ v8_source_set("wasm_module_runner") {
":torque_generated_core",
]
if (v8_enable_i18n_support) {
public_deps = [
"//third_party/icu",
]
}
configs = [
":external_config",
":internal_config_base",
@ -3585,6 +3658,12 @@ v8_source_set("lib_wasm_fuzzer_common") {
":torque_generated_core",
]
if (v8_enable_i18n_support) {
public_deps = [
"//third_party/icu",
]
}
configs = [
":external_config",
":internal_config_base",

2210
deps/v8/ChangeLog vendored

File diff suppressed because it is too large Load Diff

30
deps/v8/DEPS vendored
View File

@ -12,17 +12,17 @@ vars = {
deps = {
'v8/build':
Var('chromium_url') + '/chromium/src/build.git' + '@' + 'b5df2518f091eea3d358f30757dec3e33db56156',
Var('chromium_url') + '/chromium/src/build.git' + '@' + '7315579e388589b62236ad933f09afd1e838d234',
'v8/tools/gyp':
Var('chromium_url') + '/external/gyp.git' + '@' + 'd61a9397e668fa9843c4aa7da9e79460fe590bfb',
'v8/third_party/depot_tools':
Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '083eb25f9acbe034db94a1bd5c1659125b6ebf98',
Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + 'fb734036f4b5ae6d5afc63cbfc41d3a5d1c29a82',
'v8/third_party/icu':
Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'f61e46dbee9d539a32551493e3bcc1dea92f83ec',
Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'a9a2bd3ee4f1d313651c5272252aaf2a3e7ed529',
'v8/third_party/instrumented_libraries':
Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '323cf32193caecbf074d1a0cb5b02b905f163e0f',
'v8/buildtools':
Var('chromium_url') + '/chromium/buildtools.git' + '@' + '94288c26d2ffe3aec9848c147839afee597acefd',
Var('chromium_url') + '/chromium/buildtools.git' + '@' + '0dd5c6f980d22be96b728155249df2da355989d9',
'v8/base/trace_event/common':
Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '211b3ed9d0481b4caddbee1322321b86a483ca1f',
'v8/third_party/android_ndk': {
@ -30,11 +30,11 @@ deps = {
'condition': 'checkout_android',
},
'v8/third_party/android_tools': {
'url': Var('chromium_url') + '/android_tools.git' + '@' + 'c22a664c39af72dd8f89200220713dcad811300a',
'url': Var('chromium_url') + '/android_tools.git' + '@' + '130499e25286f4d56acafa252fee09f3cc595c49',
'condition': 'checkout_android',
},
'v8/third_party/catapult': {
'url': Var('chromium_url') + '/catapult.git' + '@' + '49edbd3a2b582cbab0a912cb1989062e9b8453ff',
'url': Var('chromium_url') + '/catapult.git' + '@' + 'f5342c4cf3d3e85e43be84c22bdfd8ebff23ec70',
'condition': 'checkout_android',
},
'v8/third_party/colorama/src': {
@ -42,31 +42,33 @@ deps = {
'condition': 'checkout_android',
},
'v8/third_party/fuchsia-sdk': {
'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + 'afac8ecd6300c9903009e6f233f61aae401aced6',
'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + '82277014aeccc89bae4d7a317813affa3f7de0ee',
'condition': 'checkout_fuchsia',
},
'v8/third_party/googletest/src':
Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '08d5b1f33af8c18785fb8ca02792b5fac81e248f',
Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + 'ce468a17c434e4e79724396ee1b51d86bfc8a88b',
'v8/third_party/jinja2':
Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + '45571de473282bd1d8b63a8dfcb1fd268d0635d2',
Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + 'b41863e42637544c2941b574c7877d3e1f663e25',
'v8/third_party/markupsafe':
Var('chromium_url') + '/chromium/src/third_party/markupsafe.git' + '@' + '8f45f5cfa0009d2a70589bcda0349b8cb2b72783',
'v8/third_party/proguard':
Var('chromium_url') + '/chromium/src/third_party/proguard.git' + '@' + 'eba7a98d98735b2cc65c54d36baa5c9b46fe4f8e',
'v8/tools/swarming_client':
Var('chromium_url') + '/infra/luci/client-py.git' + '@' + '833f5ebf894be1e3e6d13678d5de8479bf12ff28',
Var('chromium_url') + '/infra/luci/client-py.git' + '@' + '9a518d097dca20b7b00ce3bdfc5d418ccc79893a',
'v8/test/benchmarks/data':
Var('chromium_url') + '/v8/deps/third_party/benchmarks.git' + '@' + '05d7188267b4560491ff9155c5ee13e207ecd65f',
'v8/test/mozilla/data':
Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be',
'v8/test/test262/data':
Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '0192e0d70e2295fb590f14865da42f0f9dfa64bd',
Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'a6c1d05ac4fed084fa047e4c52ab2a8c9c2a8aef',
'v8/test/test262/harness':
Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '0f2acdd882c84cff43b9d60df7574a1901e2cdcd',
'v8/tools/clang':
Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'c893c7eec4706f8c7fc244ee254b1dadd8f8d158',
Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'c0b1d892b2bc1291eb287d716ca239c1b03fb215',
'v8/tools/luci-go':
Var('chromium_url') + '/chromium/src/tools/luci-go.git' + '@' + 'ff0709d4283b1f233dcf0c9fec1672c6ecaed2f1',
Var('chromium_url') + '/chromium/src/tools/luci-go.git' + '@' + 'abcd908f74fdb155cc8870f5cae48dff1ece7c3c',
'v8/test/wasm-js':
Var('chromium_url') + '/external/github.com/WebAssembly/spec.git' + '@' + '27d63f22e72395248d314520b3ad5b1e0943fc10',
Var('chromium_url') + '/external/github.com/WebAssembly/spec.git' + '@' + '2113ea7e106f8a964e0445ba38f289d2aa845edd',
}
recursedeps = [

4
deps/v8/OWNERS vendored
View File

@ -10,10 +10,10 @@ clemensh@chromium.org
danno@chromium.org
delphick@chromium.org
eholk@chromium.org
franzih@chromium.org
gdeepti@chromium.org
gsathya@chromium.org
hablich@chromium.org
herhut@chromium.org
hpayer@chromium.org
ishell@chromium.org
jarin@chromium.org
@ -22,8 +22,10 @@ jkummerow@chromium.org
kschimpf@chromium.org
leszeks@chromium.org
machenbach@chromium.org
mathias@chromium.org
marja@chromium.org
mlippautz@chromium.org
mslekova@chromium.org
mstarzinger@chromium.org
mvstanton@chromium.org
mythria@chromium.org

View File

@ -96,7 +96,9 @@ def _V8PresubmitChecks(input_api, output_api):
input_api.AffectedFiles(include_deletes=True)):
results.append(output_api.PresubmitError("Status file check failed"))
results.extend(input_api.canned_checks.CheckAuthorizedAuthor(
input_api, output_api))
input_api, output_api, bot_whitelist=[
'v8-ci-autoroll-builder@chops-service-accounts.iam.gserviceaccount.com'
]))
return results

4
deps/v8/custom_deps/.gitignore vendored Normal file
View File

@ -0,0 +1,4 @@
*
!.gitignore
!OWNERS
!README.md

2
deps/v8/custom_deps/OWNERS vendored Normal file
View File

@ -0,0 +1,2 @@
machenbach@chromium.org
sergiyb@chromium.org

2
deps/v8/custom_deps/README.md vendored Normal file
View File

@ -0,0 +1,2 @@
Common directory for custom dependencies pulled in via .gclient custom_deps.
All subdirectories are ignored by git by default.

7
deps/v8/gni/v8.gni vendored
View File

@ -38,7 +38,7 @@ declare_args() {
v8_use_snapshot = !(is_win && host_os != "win" && target_cpu == "x64")
# Enable several snapshots side-by-side (e.g. default and for trusted code).
v8_use_multi_snapshots = ""
v8_use_multi_snapshots = false
# Use external files for startup data blobs:
# the JS builtins sources and the start snapshot.
@ -61,7 +61,10 @@ if (v8_use_external_startup_data == "") {
v8_use_external_startup_data = v8_use_snapshot && !is_ios
}
if (v8_use_multi_snapshots == "") {
if (v8_use_multi_snapshots) {
# Silently disable multi snapshots if they're incompatible with the current
# build configuration. This allows us to set v8_use_multi_snapshots=true on
# all bots, and e.g. no-snapshot bots will automatically do the right thing.
v8_use_multi_snapshots =
v8_use_external_startup_data && !build_with_chromium && !use_jumbo_build
}

View File

@ -54,7 +54,7 @@ namespace v8 {
*/
class V8_EXPORT TracingCpuProfiler {
public:
V8_DEPRECATE_SOON(
V8_DEPRECATED(
"The profiler is created automatically with the isolate.\n"
"No need to create it explicitly.",
static std::unique_ptr<TracingCpuProfiler> Create(Isolate*));

View File

@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 6
#define V8_MINOR_VERSION 8
#define V8_BUILD_NUMBER 275
#define V8_PATCH_LEVEL 32
#define V8_MINOR_VERSION 9
#define V8_BUILD_NUMBER 427
#define V8_PATCH_LEVEL 22
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)

340
deps/v8/include/v8.h vendored
View File

@ -71,7 +71,6 @@ class BigIntObject;
class Boolean;
class BooleanObject;
class Context;
class CpuProfiler;
class Data;
class Date;
class External;
@ -146,6 +145,8 @@ class DeferredHandles;
class Heap;
class HeapObject;
class Isolate;
class LocalEmbedderHeapTracer;
class NeverReadOnlySpaceObject;
class Object;
struct ScriptStreamingData;
template<typename T> class CustomArguments;
@ -154,6 +155,7 @@ class FunctionCallbackArguments;
class GlobalHandles;
namespace wasm {
class CompilationResultResolver;
class StreamingDecoder;
} // namespace wasm
@ -176,18 +178,18 @@ const int kSmiTag = 0;
const int kSmiTagSize = 1;
const intptr_t kSmiTagMask = (1 << kSmiTagSize) - 1;
template <size_t ptr_size>
template <size_t tagged_ptr_size>
struct SmiTagging;
template <int kSmiShiftSize>
V8_INLINE internal::Object* IntToSmi(int value) {
int smi_shift_bits = kSmiTagSize + kSmiShiftSize;
uintptr_t tagged_value =
(static_cast<uintptr_t>(value) << smi_shift_bits) | kSmiTag;
intptr_t tagged_value =
(static_cast<intptr_t>(value) << smi_shift_bits) | kSmiTag;
return reinterpret_cast<internal::Object*>(tagged_value);
}
// Smi constants for 32-bit systems.
// Smi constants for systems where tagged pointer is a 32-bit value.
template <>
struct SmiTagging<4> {
enum { kSmiShiftSize = 0, kSmiValueSize = 31 };
@ -217,7 +219,7 @@ struct SmiTagging<4> {
}
};
// Smi constants for 64-bit systems.
// Smi constants for systems where tagged pointer is a 64-bit value.
template <>
struct SmiTagging<8> {
enum { kSmiShiftSize = 31, kSmiValueSize = 32 };
@ -237,7 +239,15 @@ struct SmiTagging<8> {
}
};
#if V8_COMPRESS_POINTERS
static_assert(
kApiPointerSize == kApiInt64Size,
"Pointer compression can be enabled only for 64-bit architectures");
typedef SmiTagging<4> PlatformSmiTagging;
#else
typedef SmiTagging<kApiPointerSize> PlatformSmiTagging;
#endif
const int kSmiShiftSize = PlatformSmiTagging::kSmiShiftSize;
const int kSmiValueSize = PlatformSmiTagging::kSmiValueSize;
const int kSmiMinValue = (static_cast<unsigned int>(-1)) << (kSmiValueSize - 1);
@ -985,8 +995,8 @@ class V8_EXPORT HandleScope {
void operator delete[](void*, size_t);
// Uses heap_object to obtain the current Isolate.
static internal::Object** CreateHandle(internal::HeapObject* heap_object,
internal::Object* value);
static internal::Object** CreateHandle(
internal::NeverReadOnlySpaceObject* heap_object, internal::Object* value);
internal::Isolate* isolate_;
internal::Object** prev_next_;
@ -1022,6 +1032,11 @@ class V8_EXPORT EscapableHandleScope : public HandleScope {
return Local<T>(reinterpret_cast<T*>(slot));
}
template <class T>
V8_INLINE MaybeLocal<T> EscapeMaybe(MaybeLocal<T> value) {
return Escape(value.FromMaybe(Local<T>()));
}
EscapableHandleScope(const EscapableHandleScope&) = delete;
void operator=(const EscapableHandleScope&) = delete;
@ -1108,8 +1123,12 @@ class V8_EXPORT PrimitiveArray {
public:
static Local<PrimitiveArray> New(Isolate* isolate, int length);
int Length() const;
void Set(int index, Local<Primitive> item);
Local<Primitive> Get(int index);
void Set(Isolate* isolate, int index, Local<Primitive> item);
Local<Primitive> Get(Isolate* isolate, int index);
V8_DEPRECATE_SOON("Use Isolate version",
void Set(int index, Local<Primitive> item));
V8_DEPRECATE_SOON("Use Isolate version", Local<Primitive> Get(int index));
};
/**
@ -1468,6 +1487,10 @@ class V8_EXPORT ScriptCompiler {
* more than two data chunks. The embedder can avoid this problem by always
* returning at least 2 bytes of data.
*
* When streaming UTF-16 data, V8 does not handle characters split between
* two data chunks. The embedder has to make sure that chunks have an even
* length.
*
* If the embedder wants to cancel the streaming, they should make the next
* GetMoreData call return 0. V8 will interpret it as end of data (and most
* probably, parsing will fail). The streaming task will return as soon as
@ -1651,7 +1674,9 @@ class V8_EXPORT ScriptCompiler {
* ECMAScript specification.
*/
static V8_WARN_UNUSED_RESULT MaybeLocal<Module> CompileModule(
Isolate* isolate, Source* source);
Isolate* isolate, Source* source,
CompileOptions options = kNoCompileOptions,
NoCacheReason no_cache_reason = kNoCacheNoReason);
/**
* Compile a function for a given context. This is equivalent to running
@ -4349,10 +4374,29 @@ class V8_EXPORT Proxy : public Object {
class V8_EXPORT WasmCompiledModule : public Object {
public:
typedef std::pair<std::unique_ptr<const uint8_t[]>, size_t> SerializedModule;
// The COMMA macro allows us to use ',' inside of the V8_DEPRECATE_SOON macro.
#define COMMA ,
V8_DEPRECATE_SOON(
"Use BufferReference.",
typedef std::pair<const uint8_t * COMMA size_t> CallerOwnedBuffer);
#undef COMMA
/**
* A buffer that is owned by the caller.
* A unowned reference to a byte buffer.
*/
typedef std::pair<const uint8_t*, size_t> CallerOwnedBuffer;
struct BufferReference {
const uint8_t* start;
size_t size;
BufferReference(const uint8_t* start, size_t size)
: start(start), size(size) {}
// Temporarily allow conversion to and from CallerOwnedBuffer.
V8_DEPRECATE_SOON(
"Use BufferReference directly.",
inline BufferReference(CallerOwnedBuffer)); // NOLINT(runtime/explicit)
V8_DEPRECATE_SOON("Use BufferReference directly.",
inline operator CallerOwnedBuffer());
};
/**
* An opaque, native heap object for transferring wasm modules. It
@ -4369,7 +4413,7 @@ class V8_EXPORT WasmCompiledModule : public Object {
private:
typedef std::pair<std::unique_ptr<const uint8_t[]>, size_t> OwnedBuffer;
friend class WasmCompiledModule;
TransferrableModule(OwnedBuffer&& code, OwnedBuffer&& bytes)
TransferrableModule(OwnedBuffer code, OwnedBuffer bytes)
: compiled_code(std::move(code)), wire_bytes(std::move(bytes)) {}
OwnedBuffer compiled_code = {nullptr, 0};
@ -4393,7 +4437,9 @@ class V8_EXPORT WasmCompiledModule : public Object {
/**
* Get the wasm-encoded bytes that were used to compile this module.
*/
Local<String> GetWasmWireBytes();
BufferReference GetWasmWireBytesRef();
V8_DEPRECATE_SOON("Use GetWasmWireBytesRef version.",
Local<String> GetWasmWireBytes());
/**
* Serialize the compiled module. The serialized data does not include the
@ -4406,18 +4452,18 @@ class V8_EXPORT WasmCompiledModule : public Object {
* uncompiled bytes.
*/
static MaybeLocal<WasmCompiledModule> DeserializeOrCompile(
Isolate* isolate, const CallerOwnedBuffer& serialized_module,
const CallerOwnedBuffer& wire_bytes);
Isolate* isolate, BufferReference serialized_module,
BufferReference wire_bytes);
V8_INLINE static WasmCompiledModule* Cast(Value* obj);
private:
static MaybeLocal<WasmCompiledModule> Deserialize(
Isolate* isolate, const CallerOwnedBuffer& serialized_module,
const CallerOwnedBuffer& wire_bytes);
Isolate* isolate, BufferReference serialized_module,
BufferReference wire_bytes);
static MaybeLocal<WasmCompiledModule> Compile(Isolate* isolate,
const uint8_t* start,
size_t length);
static CallerOwnedBuffer AsCallerOwned(
static BufferReference AsReference(
const TransferrableModule::OwnedBuffer& buff) {
return {buff.first.get(), buff.second};
}
@ -4426,6 +4472,61 @@ class V8_EXPORT WasmCompiledModule : public Object {
static void CheckCast(Value* obj);
};
// TODO(clemensh): Remove after M69 branch.
WasmCompiledModule::BufferReference::BufferReference(
WasmCompiledModule::CallerOwnedBuffer buf)
: BufferReference(buf.first, buf.second) {}
WasmCompiledModule::BufferReference::
operator WasmCompiledModule::CallerOwnedBuffer() {
return {start, size};
}
/**
* The V8 interface for WebAssembly streaming compilation. When streaming
* compilation is initiated, V8 passes a {WasmStreaming} object to the embedder
* such that the embedder can pass the input butes for streaming compilation to
* V8.
*/
class V8_EXPORT WasmStreaming final {
public:
class WasmStreamingImpl;
WasmStreaming(std::unique_ptr<WasmStreamingImpl> impl);
~WasmStreaming();
/**
* Pass a new chunck of bytes to WebAssembly streaming compilation.
* The buffer passed into {OnBytesReceived} is owned by the caller.
*/
void OnBytesReceived(const uint8_t* bytes, size_t size);
/**
* {Finish} should be called after all received bytes where passed to
* {OnBytesReceived} to tell V8 that there will be no more bytes. {Finish}
* does not have to be called after {Abort} has been called already.
*/
void Finish();
/**
* Abort streaming compilation. If {exception} has a value, then the promise
* associated with streaming compilation is rejected with that value. If
* {exception} does not have value, the promise does not get rejected.
*/
void Abort(MaybeLocal<Value> exception);
/**
* Unpacks a {WasmStreaming} object wrapped in a {Managed} for the embedder.
* Since the embedder is on the other side of the API, it cannot unpack the
* {Managed} itself.
*/
static std::shared_ptr<WasmStreaming> Unpack(Isolate* isolate,
Local<Value> value);
private:
std::unique_ptr<WasmStreamingImpl> impl_;
};
// TODO(mtrofin): when streaming compilation is done, we can rename this
// to simply WasmModuleObjectBuilder
class V8_EXPORT WasmModuleObjectBuilderStreaming final {
@ -5137,7 +5238,9 @@ class V8_EXPORT BooleanObject : public Object {
*/
class V8_EXPORT StringObject : public Object {
public:
static Local<Value> New(Local<String> value);
static Local<Value> New(Isolate* isolate, Local<String> value);
static V8_DEPRECATE_SOON("Use Isolate* version",
Local<Value> New(Local<String> value));
Local<String> ValueOf() const;
@ -5891,26 +5994,6 @@ enum class PropertyHandlerFlags {
};
struct NamedPropertyHandlerConfiguration {
NamedPropertyHandlerConfiguration(
GenericNamedPropertyGetterCallback getter,
GenericNamedPropertySetterCallback setter,
GenericNamedPropertyQueryCallback query,
GenericNamedPropertyDeleterCallback deleter,
GenericNamedPropertyEnumeratorCallback enumerator,
GenericNamedPropertyDefinerCallback definer,
GenericNamedPropertyDescriptorCallback descriptor,
Local<Value> data = Local<Value>(),
PropertyHandlerFlags flags = PropertyHandlerFlags::kNone)
: getter(getter),
setter(setter),
query(query),
deleter(deleter),
enumerator(enumerator),
definer(definer),
descriptor(descriptor),
data(data),
flags(flags) {}
NamedPropertyHandlerConfiguration(
/** Note: getter is required */
GenericNamedPropertyGetterCallback getter = 0,
@ -5962,25 +6045,6 @@ struct NamedPropertyHandlerConfiguration {
struct IndexedPropertyHandlerConfiguration {
IndexedPropertyHandlerConfiguration(
IndexedPropertyGetterCallback getter,
IndexedPropertySetterCallback setter, IndexedPropertyQueryCallback query,
IndexedPropertyDeleterCallback deleter,
IndexedPropertyEnumeratorCallback enumerator,
IndexedPropertyDefinerCallback definer,
IndexedPropertyDescriptorCallback descriptor,
Local<Value> data = Local<Value>(),
PropertyHandlerFlags flags = PropertyHandlerFlags::kNone)
: getter(getter),
setter(setter),
query(query),
deleter(deleter),
enumerator(enumerator),
definer(definer),
descriptor(descriptor),
data(data),
flags(flags) {}
IndexedPropertyHandlerConfiguration(
/** Note: getter is required */
IndexedPropertyGetterCallback getter = 0,
@ -6298,7 +6362,8 @@ class V8_EXPORT AccessorSignature : public Data {
// --- Extensions ---
V8_DEPRECATE_SOON("Implementation detail",
class ExternalOneByteStringResourceImpl);
class V8_EXPORT ExternalOneByteStringResourceImpl
: public String::ExternalOneByteStringResource {
public:
@ -6325,7 +6390,7 @@ class V8_EXPORT Extension { // NOLINT
int dep_count = 0,
const char** deps = 0,
int source_length = -1);
virtual ~Extension() { }
virtual ~Extension() { delete source_; }
virtual Local<FunctionTemplate> GetNativeFunctionTemplate(
Isolate* isolate, Local<String> name) {
return Local<FunctionTemplate>();
@ -6334,7 +6399,8 @@ class V8_EXPORT Extension { // NOLINT
const char* name() const { return name_; }
size_t source_length() const { return source_length_; }
const String::ExternalOneByteStringResource* source() const {
return &source_; }
return source_;
}
int dependency_count() { return dep_count_; }
const char** dependencies() { return deps_; }
void set_auto_enable(bool value) { auto_enable_ = value; }
@ -6347,7 +6413,7 @@ class V8_EXPORT Extension { // NOLINT
private:
const char* name_;
size_t source_length_; // expected to initialize before source_
ExternalOneByteStringResourceImpl source_;
String::ExternalOneByteStringResource* source_;
int dep_count_;
const char** deps_;
bool auto_enable_;
@ -6679,6 +6745,9 @@ typedef bool (*AllowWasmCodeGenerationCallback)(Local<Context> context,
// by the embedder. Example: WebAssembly.{compile|instantiate}Streaming ---
typedef void (*ApiImplementationCallback)(const FunctionCallbackInfo<Value>&);
// --- Callback for WebAssembly.compileStreaming ---
typedef void (*WasmStreamingCallback)(const FunctionCallbackInfo<Value>&);
// --- Garbage Collection Callbacks ---
/**
@ -6919,6 +6988,8 @@ struct JitCodeEvent {
// New location of instructions. Only valid for CODE_MOVED.
void* new_code_start;
};
Isolate* isolate;
};
/**
@ -7040,6 +7111,12 @@ class V8_EXPORT EmbedderHeapTracer {
virtual bool AdvanceTracing(double deadline_in_ms,
AdvanceTracingActions actions) = 0;
/*
* Returns true if there no more tracing work to be done (see AdvanceTracing)
* and false otherwise.
*/
virtual bool IsTracingDone() { return NumberOfWrappersToTrace() == 0; }
/**
* Called at the end of a GC cycle.
*
@ -7061,13 +7138,35 @@ class V8_EXPORT EmbedderHeapTracer {
*/
virtual void AbortTracing() = 0;
/*
* Called by the embedder to request immediaet finalization of the currently
* running tracing phase that has been started with TracePrologue and not
* yet finished with TraceEpilogue.
*
* Will be a noop when currently not in tracing.
*
* This is an experimental feature.
*/
void FinalizeTracing();
/*
* Returns the v8::Isolate this tracer is attached too and |nullptr| if it
* is not attached to any v8::Isolate.
*/
v8::Isolate* isolate() const { return isolate_; }
/**
* Returns the number of wrappers that are still to be traced by the embedder.
*/
virtual size_t NumberOfWrappersToTrace() { return 0; }
V8_DEPRECATE_SOON("Use IsTracingDone",
virtual size_t NumberOfWrappersToTrace() { return 0; });
protected:
virtual ~EmbedderHeapTracer() = default;
v8::Isolate* isolate_ = nullptr;
friend class internal::LocalEmbedderHeapTracer;
};
/**
@ -7361,6 +7460,7 @@ class V8_EXPORT Isolate {
kWebAssemblyInstantiation = 46,
kDeoptimizerDisableSpeculation = 47,
kArrayPrototypeSortJSArrayModifiedPrototype = 48,
kFunctionTokenOffsetTooLongForToString = 49,
// If you add new values here, you'll also need to update Chromium's:
// web_feature.mojom, UseCounterCallback.cpp, and enums.xml. V8 changes to
@ -7616,15 +7716,7 @@ class V8_EXPORT Isolate {
HeapProfiler* GetHeapProfiler();
/**
* Returns CPU profiler for this isolate. Will return NULL unless the isolate
* is initialized. It is the embedder's responsibility to stop all CPU
* profiling activities if it has started any.
*/
V8_DEPRECATED("CpuProfiler should be created with CpuProfiler::New call.",
CpuProfiler* GetCpuProfiler());
/**
* Tells the CPU profiler whether the embedder is idle.
* Tells the VM whether the embedder is idle or not.
*/
void SetIdle(bool is_idle);
@ -7703,6 +7795,85 @@ class V8_EXPORT Isolate {
*/
void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
/**
* Use for |AtomicsWaitCallback| to indicate the type of event it receives.
*/
enum class AtomicsWaitEvent {
/** Indicates that this call is happening before waiting. */
kStartWait,
/** `Atomics.wait()` finished because of an `Atomics.wake()` call. */
kWokenUp,
/** `Atomics.wait()` finished because it timed out. */
kTimedOut,
/** `Atomics.wait()` was interrupted through |TerminateExecution()|. */
kTerminatedExecution,
/** `Atomics.wait()` was stopped through |AtomicsWaitWakeHandle|. */
kAPIStopped,
/** `Atomics.wait()` did not wait, as the initial condition was not met. */
kNotEqual
};
/**
* Passed to |AtomicsWaitCallback| as a means of stopping an ongoing
* `Atomics.wait` call.
*/
class V8_EXPORT AtomicsWaitWakeHandle {
public:
/**
* Stop this `Atomics.wait()` call and call the |AtomicsWaitCallback|
* with |kAPIStopped|.
*
* This function may be called from another thread. The caller has to ensure
* through proper synchronization that it is not called after
* the finishing |AtomicsWaitCallback|.
*
* Note that the ECMAScript specification does not plan for the possibility
* of wakeups that are neither coming from a timeout or an `Atomics.wake()`
* call, so this may invalidate assumptions made by existing code.
* The embedder may accordingly wish to schedule an exception in the
* finishing |AtomicsWaitCallback|.
*/
void Wake();
};
/**
* Embedder callback for `Atomics.wait()` that can be added through
* |SetAtomicsWaitCallback|.
*
* This will be called just before starting to wait with the |event| value
* |kStartWait| and after finishing waiting with one of the other
* values of |AtomicsWaitEvent| inside of an `Atomics.wait()` call.
*
* |array_buffer| will refer to the underlying SharedArrayBuffer,
* |offset_in_bytes| to the location of the waited-on memory address inside
* the SharedArrayBuffer.
*
* |value| and |timeout_in_ms| will be the values passed to
* the `Atomics.wait()` call. If no timeout was used, |timeout_in_ms|
* will be `INFINITY`.
*
* In the |kStartWait| callback, |stop_handle| will be an object that
* is only valid until the corresponding finishing callback and that
* can be used to stop the wait process while it is happening.
*
* This callback may schedule exceptions, *unless* |event| is equal to
* |kTerminatedExecution|.
*/
typedef void (*AtomicsWaitCallback)(AtomicsWaitEvent event,
Local<SharedArrayBuffer> array_buffer,
size_t offset_in_bytes, int32_t value,
double timeout_in_ms,
AtomicsWaitWakeHandle* stop_handle,
void* data);
/**
* Set a new |AtomicsWaitCallback|. This overrides an earlier
* |AtomicsWaitCallback|, if there was any. If |callback| is nullptr,
* this unsets the callback. |data| will be passed to the callback
* as its last parameter.
*/
void SetAtomicsWaitCallback(AtomicsWaitCallback callback, void* data);
/**
* Enables the host application to receive a notification after a
* garbage collection. Allocations are allowed in the callback function,
@ -7956,6 +8127,18 @@ class V8_EXPORT Isolate {
*/
void IsolateInBackgroundNotification();
/**
* Optional notification which will enable the memory savings mode.
* V8 uses this notification to guide heuristics which may result in a
* smaller memory footprint at the cost of reduced runtime performance.
*/
void EnableMemorySavingsMode();
/**
* Optional notification which will disable the memory savings mode.
*/
void DisableMemorySavingsMode();
/**
* Optional notification to tell V8 the current performance requirements
* of the embedder based on RAIL.
@ -8079,6 +8262,8 @@ class V8_EXPORT Isolate {
void SetWasmCompileStreamingCallback(ApiImplementationCallback callback);
void SetWasmStreamingCallback(WasmStreamingCallback callback);
/**
* Check if V8 is dead and therefore unusable. This is the case after
* fatal errors such as out-of-memory situations.
@ -9927,7 +10112,6 @@ AccessorSignature* AccessorSignature::Cast(Data* data) {
Local<Value> Object::GetInternalField(int index) {
#ifndef V8_ENABLE_CHECKS
typedef internal::Object O;
typedef internal::HeapObject HO;
typedef internal::Internals I;
O* obj = *reinterpret_cast<O**>(this);
// Fast path: If the object is a plain JSObject, which is the common case, we
@ -9938,7 +10122,8 @@ Local<Value> Object::GetInternalField(int index) {
instance_type == I::kJSSpecialApiObjectType) {
int offset = I::kJSObjectHeaderSize + (internal::kApiPointerSize * index);
O* value = I::ReadField<O*>(obj, offset);
O** result = HandleScope::CreateHandle(reinterpret_cast<HO*>(obj), value);
O** result = HandleScope::CreateHandle(
reinterpret_cast<internal::NeverReadOnlySpaceObject*>(obj), value);
return Local<Value>(reinterpret_cast<Value*>(result));
}
#endif
@ -10578,9 +10763,8 @@ int64_t Isolate::AdjustAmountOfExternalAllocatedMemory(
Local<Value> Context::GetEmbedderData(int index) {
#ifndef V8_ENABLE_CHECKS
typedef internal::Object O;
typedef internal::HeapObject HO;
typedef internal::Internals I;
HO* context = *reinterpret_cast<HO**>(this);
auto* context = *reinterpret_cast<internal::NeverReadOnlySpaceObject**>(this);
O** result =
HandleScope::CreateHandle(context, I::ReadEmbedderData<O*>(this, index));
return Local<Value>(reinterpret_cast<Value*>(result));

View File

@ -111,7 +111,5 @@ verifiers {
}
}
}
sign_cla {}
}

View File

@ -30,15 +30,27 @@
'ppc.debug': 'default_debug_ppc',
'ppc.optdebug': 'default_optdebug_ppc',
'ppc.release': 'default_release_ppc',
'ppc.debug.sim': 'default_debug_ppc_sim',
'ppc.optdebug.sim': 'default_optdebug_ppc_sim',
'ppc.release.sim': 'default_release_ppc_sim',
'ppc64.debug': 'default_debug_ppc64',
'ppc64.optdebug': 'default_optdebug_ppc64',
'ppc64.release': 'default_release_ppc64',
'ppc64.debug.sim': 'default_debug_ppc64_sim',
'ppc64.optdebug.sim': 'default_optdebug_ppc64_sim',
'ppc64.release.sim': 'default_release_ppc64_sim',
's390.debug': 'default_debug_s390',
's390.optdebug': 'default_optdebug_s390',
's390.release': 'default_release_s390',
's390.debug.sim': 'default_debug_s390_sim',
's390.optdebug.sim': 'default_optdebug_s390_sim',
's390.release.sim': 'default_release_s390_sim',
's390x.debug': 'default_debug_s390x',
's390x.optdebug': 'default_optdebug_s390x',
's390x.release': 'default_release_s390x',
's390x.debug.sim': 'default_debug_s390x_sim',
's390x.optdebug.sim': 'default_optdebug_s390x_sim',
's390x.release.sim': 'default_release_s390x_sim',
'x64.debug': 'default_debug_x64',
'x64.optdebug': 'default_optdebug_x64',
'x64.release': 'default_release_x64',
@ -95,6 +107,7 @@
'V8 Fuchsia': 'release_x64_fuchsia',
'V8 Fuchsia - debug': 'debug_x64_fuchsia',
'V8 Linux64 - cfi': 'release_x64_cfi',
'V8 Linux64 UBSan': 'release_x64_ubsan',
'V8 Linux64 UBSanVptr': 'release_x64_ubsan_vptr',
'V8 Linux - vtunejit': 'debug_x86_vtunejit',
'V8 Linux64 - gcov coverage': 'release_x64_gcc_coverage',
@ -285,28 +298,52 @@
'default_release_mips64el': [
'release', 'simulate_mips64el'],
'default_debug_ppc': [
'debug', 'simulate_ppc', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'debug', 'ppc', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_ppc': [
'debug', 'simulate_ppc', 'v8_enable_slow_dchecks'],
'debug', 'ppc', 'v8_enable_slow_dchecks'],
'default_release_ppc': [
'release', 'ppc'],
'default_debug_ppc_sim': [
'debug', 'simulate_ppc', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_ppc_sim': [
'debug', 'simulate_ppc', 'v8_enable_slow_dchecks'],
'default_release_ppc_sim': [
'release', 'simulate_ppc'],
'default_debug_ppc64': [
'debug', 'simulate_ppc64', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'debug', 'ppc64', 'gcc', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_ppc64': [
'debug', 'simulate_ppc64', 'v8_enable_slow_dchecks'],
'debug', 'ppc64', 'gcc', 'v8_enable_slow_dchecks'],
'default_release_ppc64': [
'release', 'ppc64', 'gcc'],
'default_debug_ppc64_sim': [
'debug', 'simulate_ppc64', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_ppc64_sim': [
'debug', 'simulate_ppc64', 'v8_enable_slow_dchecks'],
'default_release_ppc64_sim': [
'release', 'simulate_ppc64'],
'default_debug_s390': [
'debug', 'simulate_s390', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'debug', 's390', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_s390': [
'debug', 'simulate_s390', 'v8_enable_slow_dchecks'],
'debug', 's390', 'v8_enable_slow_dchecks'],
'default_release_s390': [
'release', 's390'],
'default_debug_s390_sim': [
'debug', 'simulate_s390', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_s390_sim': [
'debug', 'simulate_s390', 'v8_enable_slow_dchecks'],
'default_release_s390_sim': [
'release', 'simulate_s390'],
'default_debug_s390x': [
'debug', 'simulate_s390x', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'debug', 's390x', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_s390x': [
'debug', 'simulate_s390x', 'v8_enable_slow_dchecks'],
'debug', 's390x', 'v8_enable_slow_dchecks'],
'default_release_s390x': [
'release', 's390x'],
'default_debug_s390x_sim': [
'debug', 'simulate_s390x', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_s390x_sim': [
'debug', 'simulate_s390x', 'v8_enable_slow_dchecks'],
'default_release_s390x_sim': [
'release', 'simulate_s390x'],
'default_debug_x64': [
'debug', 'x64', 'v8_enable_slow_dchecks', 'v8_full_debug'],
@ -417,8 +454,7 @@
'release_x64_gcc_coverage': [
'release_bot', 'x64', 'coverage', 'gcc', 'no_custom_libcxx', 'no_sysroot'],
'release_x64_internal': [
'release_bot', 'x64', 'v8_enable_embedded_builtins',
'v8_snapshot_internal'],
'release_bot', 'x64', 'v8_snapshot_internal'],
'release_x64_jumbo': [
'release_bot', 'x64', 'jumbo'],
'release_x64_jumbo_trybot': [
@ -444,6 +480,8 @@
'minimal_symbols'],
'release_x64_tsan_minimal_symbols': [
'release_bot', 'x64', 'tsan', 'minimal_symbols'],
'release_x64_ubsan': [
'release_bot', 'x64', 'ubsan'],
'release_x64_ubsan_vptr': [
'release_bot', 'x64', 'ubsan_vptr'],
'release_x64_ubsan_vptr_recover_edge': [
@ -452,7 +490,7 @@
'release_bot', 'x64', 'ubsan_vptr', 'minimal_symbols'],
'release_x64_verify_csa': [
'release_bot', 'x64', 'dcheck_always_on',
'v8_enable_slow_dchecks', 'v8_enable_embedded_builtins', 'v8_verify_csa'],
'v8_enable_slow_dchecks', 'v8_verify_csa'],
# Debug configs for x64.
'debug_x64': [
@ -588,10 +626,14 @@
'gn_args': 'is_debug=true v8_enable_backtrace=true',
},
'v8_use_multi_snapshots': {
'gn_args': 'v8_use_multi_snapshots=true',
},
'debug_bot': {
'mixins': [
'debug', 'shared', 'goma', 'v8_enable_slow_dchecks',
'v8_optimized_debug'],
'v8_use_multi_snapshots', 'v8_optimized_debug'],
},
'debug_trybot': {
@ -678,7 +720,7 @@
},
'release_bot': {
'mixins': ['release', 'static', 'goma'],
'mixins': ['release', 'static', 'goma', 'v8_use_multi_snapshots'],
},
'release_trybot': {
@ -736,6 +778,14 @@
'gn_args': 'is_tsan=true',
},
'ubsan': {
'mixins': ['v8_enable_test_features'],
# TODO(krasin): Remove is_ubsan_no_recover=true when
# https://llvm.org/bugs/show_bug.cgi?id=25569 is fixed and just use
# ubsan instead.
'gn_args': 'is_ubsan=true is_ubsan_no_recover=true',
},
'ubsan_vptr': {
'mixins': ['v8_enable_test_features'],
# TODO(krasin): Remove is_ubsan_no_recover=true when
@ -763,10 +813,6 @@
'gn_args': 'v8_correctness_fuzzer=true v8_multi_arch_build=true',
},
'v8_enable_embedded_builtins': {
'gn_args': 'v8_enable_embedded_builtins=true',
},
'v8_enable_slow_dchecks': {
'gn_args': 'v8_enable_slow_dchecks=true',
},
@ -816,6 +862,22 @@
'gn_args': 'v8_enable_verify_csa=true',
},
's390': {
'gn_args': 'target_cpu="s390x" v8_target_cpu="s390"',
},
's390x': {
'gn_args': 'target_cpu="s390x" v8_target_cpu="s390x"',
},
'ppc': {
'gn_args': 'target_cpu="ppc"',
},
'ppc64': {
'gn_args': 'target_cpu="ppc64" use_custom_libcxx=false',
},
'x64': {
'gn_args': 'target_cpu="x64"',
},
@ -823,5 +885,6 @@
'x86': {
'gn_args': 'target_cpu="x86"',
},
},
}

View File

@ -248,7 +248,7 @@
{'name': 'mjsunit_sp_frame_access'},
{'name': 'mozilla'},
{'name': 'test262'},
{'name': 'v8testing', 'shards': 7},
{'name': 'v8testing', 'shards': 9},
{'name': 'v8testing', 'variant': 'extra', 'shards': 3},
],
},

View File

@ -34,22 +34,63 @@ int main(int argc, char* argv[]) {
// Enter the context for compiling and running the hello world script.
v8::Context::Scope context_scope(context);
// Create a string containing the JavaScript source code.
v8::Local<v8::String> source =
v8::String::NewFromUtf8(isolate, "'Hello' + ', World!'",
v8::NewStringType::kNormal)
.ToLocalChecked();
{
// Create a string containing the JavaScript source code.
v8::Local<v8::String> source =
v8::String::NewFromUtf8(isolate, "'Hello' + ', World!'",
v8::NewStringType::kNormal)
.ToLocalChecked();
// Compile the source code.
v8::Local<v8::Script> script =
v8::Script::Compile(context, source).ToLocalChecked();
// Compile the source code.
v8::Local<v8::Script> script =
v8::Script::Compile(context, source).ToLocalChecked();
// Run the script to get the result.
v8::Local<v8::Value> result = script->Run(context).ToLocalChecked();
// Run the script to get the result.
v8::Local<v8::Value> result = script->Run(context).ToLocalChecked();
// Convert the result to an UTF8 string and print it.
v8::String::Utf8Value utf8(isolate, result);
printf("%s\n", *utf8);
// Convert the result to an UTF8 string and print it.
v8::String::Utf8Value utf8(isolate, result);
printf("%s\n", *utf8);
}
{
// Use the JavaScript API to generate a WebAssembly module.
//
// |bytes| contains the binary format for the following module:
//
// (func (export "add") (param i32 i32) (result i32)
// get_local 0
// get_local 1
// i32.add)
//
const char* csource = R"(
let bytes = new Uint8Array([
0x00, 0x61, 0x73, 0x6d, 0x01, 0x00, 0x00, 0x00, 0x01, 0x07, 0x01,
0x60, 0x02, 0x7f, 0x7f, 0x01, 0x7f, 0x03, 0x02, 0x01, 0x00, 0x07,
0x07, 0x01, 0x03, 0x61, 0x64, 0x64, 0x00, 0x00, 0x0a, 0x09, 0x01,
0x07, 0x00, 0x20, 0x00, 0x20, 0x01, 0x6a, 0x0b
]);
let module = new WebAssembly.Module(bytes);
let instance = new WebAssembly.Instance(module);
instance.exports.add(3, 4);
)";
// Create a string containing the JavaScript source code.
v8::Local<v8::String> source =
v8::String::NewFromUtf8(isolate, csource, v8::NewStringType::kNormal)
.ToLocalChecked();
// Compile the source code.
v8::Local<v8::Script> script =
v8::Script::Compile(context, source).ToLocalChecked();
// Run the script to get the result.
v8::Local<v8::Value> result = script->Run(context).ToLocalChecked();
// Convert the result to a uint32 and print it.
uint32_t number = result->Uint32Value(context).ToChecked();
printf("3 + 4 = %u\n", number);
}
}
// Dispose the isolate and tear down V8.

View File

@ -13,6 +13,7 @@
#include "src/isolate-inl.h"
#include "src/messages.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/module-inl.h"
#include "src/property-details.h"
#include "src/prototype.h"
@ -45,11 +46,11 @@ Handle<AccessorInfo> Accessors::MakeAccessor(
return info;
}
static V8_INLINE bool CheckForName(Handle<Name> name,
static V8_INLINE bool CheckForName(Isolate* isolate, Handle<Name> name,
Handle<String> property_name, int offset,
FieldIndex::Encoding encoding,
FieldIndex* index) {
if (Name::Equals(name, property_name)) {
if (Name::Equals(isolate, name, property_name)) {
*index = FieldIndex::ForInObjectOffset(offset, encoding);
return true;
}
@ -59,17 +60,15 @@ static V8_INLINE bool CheckForName(Handle<Name> name,
// Returns true for properties that are accessors to object fields.
// If true, *object_offset contains offset of object field.
bool Accessors::IsJSObjectFieldAccessor(Handle<Map> map, Handle<Name> name,
FieldIndex* index) {
Isolate* isolate = name->GetIsolate();
bool Accessors::IsJSObjectFieldAccessor(Isolate* isolate, Handle<Map> map,
Handle<Name> name, FieldIndex* index) {
switch (map->instance_type()) {
case JS_ARRAY_TYPE:
return CheckForName(name, isolate->factory()->length_string(),
return CheckForName(isolate, name, isolate->factory()->length_string(),
JSArray::kLengthOffset, FieldIndex::kTagged, index);
default:
if (map->instance_type() < FIRST_NONSTRING_TYPE) {
return CheckForName(name, isolate->factory()->length_string(),
return CheckForName(isolate, name, isolate->factory()->length_string(),
String::kLengthOffset, FieldIndex::kTagged, index);
}
@ -169,7 +168,8 @@ void Accessors::ArrayLengthSetter(
RuntimeCallCounterId::kArrayLengthSetter);
HandleScope scope(isolate);
DCHECK(Utils::OpenHandle(*name)->SameValue(isolate->heap()->length_string()));
DCHECK(Utils::OpenHandle(*name)->SameValue(
ReadOnlyRoots(isolate).length_string()));
Handle<JSReceiver> object = Utils::OpenHandle(*info.Holder());
Handle<JSArray> array = Handle<JSArray>::cast(object);
@ -237,7 +237,8 @@ void Accessors::ModuleNamespaceEntryGetter(
JSModuleNamespace* holder =
JSModuleNamespace::cast(*Utils::OpenHandle(*info.Holder()));
Handle<Object> result;
if (!holder->GetExport(Handle<String>::cast(Utils::OpenHandle(*name)))
if (!holder
->GetExport(isolate, Handle<String>::cast(Utils::OpenHandle(*name)))
.ToHandle(&result)) {
isolate->OptionalRescheduleException(false);
} else {
@ -304,333 +305,6 @@ Handle<AccessorInfo> Accessors::MakeStringLengthInfo(Isolate* isolate) {
&StringLengthGetter, nullptr);
}
//
// Accessors::ScriptColumnOffset
//
void Accessors::ScriptColumnOffsetGetter(
v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
Object* object = *Utils::OpenHandle(*info.Holder());
Object* res = Smi::FromInt(
Script::cast(JSValue::cast(object)->value())->column_offset());
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
}
Handle<AccessorInfo> Accessors::MakeScriptColumnOffsetInfo(Isolate* isolate) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("column_offset")));
return MakeAccessor(isolate, name, &ScriptColumnOffsetGetter, nullptr);
}
//
// Accessors::ScriptId
//
void Accessors::ScriptIdGetter(
v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
Object* object = *Utils::OpenHandle(*info.Holder());
Object* id = Smi::FromInt(Script::cast(JSValue::cast(object)->value())->id());
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(id, isolate)));
}
Handle<AccessorInfo> Accessors::MakeScriptIdInfo(Isolate* isolate) {
Handle<String> name(
isolate->factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("id")));
return MakeAccessor(isolate, name, &ScriptIdGetter, nullptr);
}
//
// Accessors::ScriptName
//
void Accessors::ScriptNameGetter(
v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
Object* object = *Utils::OpenHandle(*info.Holder());
Object* source = Script::cast(JSValue::cast(object)->value())->name();
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(source, isolate)));
}
Handle<AccessorInfo> Accessors::MakeScriptNameInfo(Isolate* isolate) {
return MakeAccessor(isolate, isolate->factory()->name_string(),
&ScriptNameGetter, nullptr);
}
//
// Accessors::ScriptSource
//
void Accessors::ScriptSourceGetter(
v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
Object* object = *Utils::OpenHandle(*info.Holder());
Object* source = Script::cast(JSValue::cast(object)->value())->source();
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(source, isolate)));
}
Handle<AccessorInfo> Accessors::MakeScriptSourceInfo(Isolate* isolate) {
return MakeAccessor(isolate, isolate->factory()->source_string(),
&ScriptSourceGetter, nullptr);
}
//
// Accessors::ScriptLineOffset
//
void Accessors::ScriptLineOffsetGetter(
v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
Object* object = *Utils::OpenHandle(*info.Holder());
Object* res =
Smi::FromInt(Script::cast(JSValue::cast(object)->value())->line_offset());
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
}
Handle<AccessorInfo> Accessors::MakeScriptLineOffsetInfo(Isolate* isolate) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("line_offset")));
return MakeAccessor(isolate, name, &ScriptLineOffsetGetter, nullptr);
}
//
// Accessors::ScriptType
//
void Accessors::ScriptTypeGetter(
v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
Object* object = *Utils::OpenHandle(*info.Holder());
Object* res =
Smi::FromInt(Script::cast(JSValue::cast(object)->value())->type());
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
}
Handle<AccessorInfo> Accessors::MakeScriptTypeInfo(Isolate* isolate) {
Handle<String> name(
isolate->factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("type")));
return MakeAccessor(isolate, name, &ScriptTypeGetter, nullptr);
}
//
// Accessors::ScriptCompilationType
//
void Accessors::ScriptCompilationTypeGetter(
v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
Object* object = *Utils::OpenHandle(*info.Holder());
Object* res = Smi::FromInt(
Script::cast(JSValue::cast(object)->value())->compilation_type());
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
}
Handle<AccessorInfo> Accessors::MakeScriptCompilationTypeInfo(
Isolate* isolate) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("compilation_type")));
return MakeAccessor(isolate, name, &ScriptCompilationTypeGetter, nullptr);
}
//
// Accessors::ScriptSourceUrl
//
void Accessors::ScriptSourceUrlGetter(
v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
Object* object = *Utils::OpenHandle(*info.Holder());
Object* url = Script::cast(JSValue::cast(object)->value())->source_url();
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(url, isolate)));
}
Handle<AccessorInfo> Accessors::MakeScriptSourceUrlInfo(Isolate* isolate) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("source_url")));
return MakeAccessor(isolate, name, &ScriptSourceUrlGetter, nullptr);
}
//
// Accessors::ScriptSourceMappingUrl
//
void Accessors::ScriptSourceMappingUrlGetter(
v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
Object* object = *Utils::OpenHandle(*info.Holder());
Object* url =
Script::cast(JSValue::cast(object)->value())->source_mapping_url();
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(url, isolate)));
}
Handle<AccessorInfo> Accessors::MakeScriptSourceMappingUrlInfo(
Isolate* isolate) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("source_mapping_url")));
return MakeAccessor(isolate, name, &ScriptSourceMappingUrlGetter, nullptr);
}
//
// Accessors::ScriptGetContextData
//
void Accessors::ScriptContextDataGetter(
v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
Object* object = *Utils::OpenHandle(*info.Holder());
Object* res = Script::cast(JSValue::cast(object)->value())->context_data();
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
}
Handle<AccessorInfo> Accessors::MakeScriptContextDataInfo(Isolate* isolate) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("context_data")));
return MakeAccessor(isolate, name, &ScriptContextDataGetter, nullptr);
}
//
// Accessors::ScriptGetEvalFromScript
//
void Accessors::ScriptEvalFromScriptGetter(
v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
Handle<Object> object = Utils::OpenHandle(*info.Holder());
Handle<Script> script(
Script::cast(Handle<JSValue>::cast(object)->value()), isolate);
Handle<Object> result = isolate->factory()->undefined_value();
if (script->has_eval_from_shared()) {
Handle<SharedFunctionInfo> eval_from_shared(script->eval_from_shared());
if (eval_from_shared->script()->IsScript()) {
Handle<Script> eval_from_script(Script::cast(eval_from_shared->script()));
result = Script::GetWrapper(eval_from_script);
}
}
info.GetReturnValue().Set(Utils::ToLocal(result));
}
Handle<AccessorInfo> Accessors::MakeScriptEvalFromScriptInfo(Isolate* isolate) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("eval_from_script")));
return MakeAccessor(isolate, name, &ScriptEvalFromScriptGetter, nullptr);
}
//
// Accessors::ScriptGetEvalFromScriptPosition
//
void Accessors::ScriptEvalFromScriptPositionGetter(
v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
Handle<Object> object = Utils::OpenHandle(*info.Holder());
Handle<Script> script(
Script::cast(Handle<JSValue>::cast(object)->value()), isolate);
Handle<Object> result = isolate->factory()->undefined_value();
if (script->compilation_type() == Script::COMPILATION_TYPE_EVAL) {
result = Handle<Object>(Smi::FromInt(script->GetEvalPosition()), isolate);
}
info.GetReturnValue().Set(Utils::ToLocal(result));
}
Handle<AccessorInfo> Accessors::MakeScriptEvalFromScriptPositionInfo(
Isolate* isolate) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("eval_from_script_position")));
return MakeAccessor(isolate, name, &ScriptEvalFromScriptPositionGetter,
nullptr);
}
//
// Accessors::ScriptGetEvalFromFunctionName
//
void Accessors::ScriptEvalFromFunctionNameGetter(
v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
Handle<Object> object = Utils::OpenHandle(*info.Holder());
Handle<Script> script(
Script::cast(Handle<JSValue>::cast(object)->value()), isolate);
Handle<Object> result = isolate->factory()->undefined_value();
if (script->has_eval_from_shared()) {
Handle<SharedFunctionInfo> shared(script->eval_from_shared());
// Find the name of the function calling eval.
result = Handle<Object>(shared->Name(), isolate);
}
info.GetReturnValue().Set(Utils::ToLocal(result));
}
Handle<AccessorInfo> Accessors::MakeScriptEvalFromFunctionNameInfo(
Isolate* isolate) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("eval_from_function_name")));
return MakeAccessor(isolate, name, &ScriptEvalFromFunctionNameGetter,
nullptr);
}
//
// Accessors::FunctionPrototype
//
@ -824,7 +498,7 @@ Handle<JSObject> GetFrameArguments(Isolate* isolate,
// Generators currently use holes as dummy arguments when resuming. We
// must not leak those.
DCHECK(IsResumableFunction(function->shared()->kind()));
value = isolate->heap()->undefined_value();
value = ReadOnlyRoots(isolate).undefined_value();
}
array->set(i, value);
}
@ -1118,9 +792,9 @@ MaybeHandle<JSReceiver> ClearInternalStackTrace(Isolate* isolate,
Handle<JSObject> error) {
RETURN_ON_EXCEPTION(
isolate,
JSReceiver::SetProperty(error, isolate->factory()->stack_trace_symbol(),
isolate->factory()->undefined_value(),
LanguageMode::kStrict),
JSReceiver::SetProperty(
isolate, error, isolate->factory()->stack_trace_symbol(),
isolate->factory()->undefined_value(), LanguageMode::kStrict),
JSReceiver);
return error;
}
@ -1152,7 +826,7 @@ void Accessors::ErrorStackGetter(
Handle<Object> stack_trace;
Handle<Symbol> stack_trace_symbol = isolate->factory()->stack_trace_symbol();
MaybeHandle<Object> maybe_stack_trace =
JSObject::GetProperty(holder, stack_trace_symbol);
JSObject::GetProperty(isolate, holder, stack_trace_symbol);
if (!maybe_stack_trace.ToHandle(&stack_trace) ||
stack_trace->IsUndefined(isolate)) {
Handle<Object> result = isolate->factory()->undefined_value();
@ -1191,7 +865,8 @@ void Accessors::ErrorStackGetter(
}
} else {
// The stack property has been modified in the meantime.
if (!JSObject::GetProperty(holder, name).ToHandle(&formatted_stack_trace)) {
if (!JSObject::GetProperty(isolate, holder, name)
.ToHandle(&formatted_stack_trace)) {
isolate->OptionalRescheduleException(false);
return;
}

View File

@ -33,19 +33,6 @@ class JavaScriptFrame;
V(function_name, FunctionName) \
V(function_length, FunctionLength) \
V(function_prototype, FunctionPrototype) \
V(script_column_offset, ScriptColumnOffset) \
V(script_compilation_type, ScriptCompilationType) \
V(script_context_data, ScriptContextData) \
V(script_eval_from_script, ScriptEvalFromScript) \
V(script_eval_from_script_position, ScriptEvalFromScriptPosition) \
V(script_eval_from_function_name, ScriptEvalFromFunctionName) \
V(script_id, ScriptId) \
V(script_line_offset, ScriptLineOffset) \
V(script_name, ScriptName) \
V(script_source, ScriptSource) \
V(script_type, ScriptType) \
V(script_source_url, ScriptSourceUrl) \
V(script_source_mapping_url, ScriptSourceMappingUrl) \
V(string_length, StringLength)
#define SIDE_EFFECT_FREE_ACCESSOR_INFO_LIST(V) \
@ -106,7 +93,8 @@ class Accessors : public AllStatic {
// Returns true for properties that are accessors to object fields.
// If true, the matching FieldIndex is returned through |field_index|.
static bool IsJSObjectFieldAccessor(Handle<Map> map, Handle<Name> name,
static bool IsJSObjectFieldAccessor(Isolate* isolate, Handle<Map> map,
Handle<Name> name,
FieldIndex* field_index);
static MaybeHandle<Object> ReplaceAccessorWithDataProperty(

View File

@ -69,181 +69,6 @@ class RootIndexMap {
DISALLOW_COPY_AND_ASSIGN(RootIndexMap);
};
class SerializerReference {
public:
SerializerReference() : bitfield_(Special(kInvalidValue)) {}
static SerializerReference FromBitfield(uint32_t bitfield) {
return SerializerReference(bitfield);
}
static SerializerReference BackReference(AllocationSpace space,
uint32_t chunk_index,
uint32_t chunk_offset) {
DCHECK(IsAligned(chunk_offset, kObjectAlignment));
DCHECK_NE(LO_SPACE, space);
return SerializerReference(
SpaceBits::encode(space) | ChunkIndexBits::encode(chunk_index) |
ChunkOffsetBits::encode(chunk_offset >> kObjectAlignmentBits));
}
static SerializerReference MapReference(uint32_t index) {
return SerializerReference(SpaceBits::encode(MAP_SPACE) |
ValueIndexBits::encode(index));
}
static SerializerReference OffHeapBackingStoreReference(uint32_t index) {
return SerializerReference(SpaceBits::encode(kExternalSpace) |
ValueIndexBits::encode(index));
}
static SerializerReference LargeObjectReference(uint32_t index) {
return SerializerReference(SpaceBits::encode(LO_SPACE) |
ValueIndexBits::encode(index));
}
static SerializerReference AttachedReference(uint32_t index) {
return SerializerReference(SpaceBits::encode(kAttachedReferenceSpace) |
ValueIndexBits::encode(index));
}
static SerializerReference DummyReference() {
return SerializerReference(Special(kDummyValue));
}
bool is_valid() const { return bitfield_ != Special(kInvalidValue); }
bool is_back_reference() const {
return SpaceBits::decode(bitfield_) <= LAST_SPACE;
}
AllocationSpace space() const {
DCHECK(is_back_reference());
return static_cast<AllocationSpace>(SpaceBits::decode(bitfield_));
}
uint32_t chunk_offset() const {
DCHECK(is_back_reference());
return ChunkOffsetBits::decode(bitfield_) << kObjectAlignmentBits;
}
uint32_t map_index() const {
DCHECK(is_back_reference());
return ValueIndexBits::decode(bitfield_);
}
bool is_off_heap_backing_store_reference() const {
return SpaceBits::decode(bitfield_) == kExternalSpace;
}
uint32_t off_heap_backing_store_index() const {
DCHECK(is_off_heap_backing_store_reference());
return ValueIndexBits::decode(bitfield_);
}
uint32_t large_object_index() const {
DCHECK(is_back_reference());
return ValueIndexBits::decode(bitfield_);
}
uint32_t chunk_index() const {
DCHECK(is_back_reference());
return ChunkIndexBits::decode(bitfield_);
}
uint32_t back_reference() const {
DCHECK(is_back_reference());
return bitfield_ & (ChunkOffsetBits::kMask | ChunkIndexBits::kMask);
}
bool is_attached_reference() const {
return SpaceBits::decode(bitfield_) == kAttachedReferenceSpace;
}
int attached_reference_index() const {
DCHECK(is_attached_reference());
return ValueIndexBits::decode(bitfield_);
}
private:
explicit SerializerReference(uint32_t bitfield) : bitfield_(bitfield) {}
inline static uint32_t Special(int value) {
return SpaceBits::encode(kSpecialValueSpace) |
ValueIndexBits::encode(value);
}
// We use the 32-bit bitfield to encode either a back reference, a special
// value, or an attached reference index.
// Back reference:
// [ Space index ] [ Chunk index ] [ Chunk offset ]
// [ LO_SPACE ] [ large object index ]
// Special value
// [ kSpecialValueSpace ] [ Special value index ]
// Attached reference
// [ kAttachedReferenceSpace ] [ Attached reference index ]
// External
// [ kExternalSpace ] [ External reference index ]
static const int kChunkOffsetSize = kPageSizeBits - kObjectAlignmentBits;
static const int kChunkIndexSize = 32 - kChunkOffsetSize - kSpaceTagSize;
static const int kValueIndexSize = kChunkOffsetSize + kChunkIndexSize;
static const int kSpecialValueSpace = LAST_SPACE + 1;
static const int kAttachedReferenceSpace = kSpecialValueSpace + 1;
static const int kExternalSpace = kAttachedReferenceSpace + 1;
STATIC_ASSERT(kExternalSpace < (1 << kSpaceTagSize));
static const int kInvalidValue = 0;
static const int kDummyValue = 1;
// The chunk offset can also be used to encode the index of special values.
class ChunkOffsetBits : public BitField<uint32_t, 0, kChunkOffsetSize> {};
class ChunkIndexBits
: public BitField<uint32_t, ChunkOffsetBits::kNext, kChunkIndexSize> {};
class ValueIndexBits : public BitField<uint32_t, 0, kValueIndexSize> {};
STATIC_ASSERT(ChunkIndexBits::kNext == ValueIndexBits::kNext);
class SpaceBits : public BitField<int, kValueIndexSize, kSpaceTagSize> {};
STATIC_ASSERT(SpaceBits::kNext == 32);
uint32_t bitfield_;
friend class SerializerReferenceMap;
};
// Mapping objects to their location after deserialization.
// This is used during building, but not at runtime by V8.
class SerializerReferenceMap {
public:
SerializerReferenceMap()
: no_allocation_(), map_(), attached_reference_index_(0) {}
SerializerReference Lookup(void* obj) {
Maybe<uint32_t> maybe_index = map_.Get(obj);
return maybe_index.IsJust() ? SerializerReference(maybe_index.FromJust())
: SerializerReference();
}
void Add(void* obj, SerializerReference b) {
DCHECK(b.is_valid());
DCHECK(map_.Get(obj).IsNothing());
map_.Set(obj, b.bitfield_);
}
SerializerReference AddAttachedReference(HeapObject* attached_reference) {
SerializerReference reference =
SerializerReference::AttachedReference(attached_reference_index_++);
Add(attached_reference, reference);
return reference;
}
private:
DisallowHeapAllocation no_allocation_;
PointerToIndexHashMap<void*> map_;
int attached_reference_index_;
DISALLOW_COPY_AND_ASSIGN(SerializerReferenceMap);
};
} // namespace internal
} // namespace v8

View File

@ -71,8 +71,8 @@ char* StrNDup(const char* str, int n);
// and free. Used as the default policy for lists.
class FreeStoreAllocationPolicy {
public:
INLINE(void* New(size_t size)) { return Malloced::New(size); }
INLINE(static void Delete(void* p)) { Malloced::Delete(p); }
V8_INLINE void* New(size_t size) { return Malloced::New(size); }
V8_INLINE static void Delete(void* p) { Malloced::Delete(p); }
};
// Performs a malloc, with retry logic on failure. Returns nullptr on failure.

View File

@ -85,7 +85,8 @@ Handle<Object> FunctionCallbackArguments::Call(CallHandlerInfo* handler) {
v8::FunctionCallback f =
v8::ToCData<v8::FunctionCallback>(handler->callback());
if (isolate->debug_execution_mode() == DebugInfo::kSideEffects &&
!isolate->debug()->PerformSideEffectCheckForCallback(handle(handler))) {
!isolate->debug()->PerformSideEffectCheckForCallback(
handle(handler, isolate))) {
return Handle<Object>();
}
VMState<EXTERNAL> state(isolate);

View File

@ -93,9 +93,9 @@ class PropertyCallbackArguments
// Here the hole is set as default value.
// It cannot escape into js as it's removed in Call below.
values[T::kReturnValueDefaultValueIndex] =
isolate->heap()->the_hole_value();
values[T::kReturnValueIndex] = isolate->heap()->the_hole_value();
HeapObject* the_hole = ReadOnlyRoots(isolate).the_hole_value();
values[T::kReturnValueDefaultValueIndex] = the_hole;
values[T::kReturnValueIndex] = the_hole;
DCHECK(values[T::kHolderIndex]->IsHeapObject());
DCHECK(values[T::kIsolateIndex]->IsSmi());
}
@ -200,9 +200,9 @@ class FunctionCallbackArguments
values[T::kIsolateIndex] = reinterpret_cast<internal::Object*>(isolate);
// Here the hole is set as default value.
// It cannot escape into js as it's remove in Call below.
values[T::kReturnValueDefaultValueIndex] =
isolate->heap()->the_hole_value();
values[T::kReturnValueIndex] = isolate->heap()->the_hole_value();
HeapObject* the_hole = ReadOnlyRoots(isolate).the_hole_value();
values[T::kReturnValueDefaultValueIndex] = the_hole;
values[T::kReturnValueIndex] = the_hole;
DCHECK(values[T::kHolderIndex]->IsHeapObject());
DCHECK(values[T::kIsolateIndex]->IsSmi());
}

View File

@ -122,18 +122,18 @@ MaybeHandle<Object> DefineDataProperty(Isolate* isolate,
void DisableAccessChecks(Isolate* isolate, Handle<JSObject> object) {
Handle<Map> old_map(object->map());
Handle<Map> old_map(object->map(), isolate);
// Copy map so it won't interfere constructor's initial map.
Handle<Map> new_map = Map::Copy(old_map, "DisableAccessChecks");
Handle<Map> new_map = Map::Copy(isolate, old_map, "DisableAccessChecks");
new_map->set_is_access_check_needed(false);
JSObject::MigrateToMap(Handle<JSObject>::cast(object), new_map);
}
void EnableAccessChecks(Isolate* isolate, Handle<JSObject> object) {
Handle<Map> old_map(object->map());
Handle<Map> old_map(object->map(), isolate);
// Copy map so it won't interfere constructor's initial map.
Handle<Map> new_map = Map::Copy(old_map, "EnableAccessChecks");
Handle<Map> new_map = Map::Copy(isolate, old_map, "EnableAccessChecks");
new_map->set_is_access_check_needed(true);
new_map->set_may_have_interesting_symbols(true);
JSObject::MigrateToMap(object, new_map);
@ -202,19 +202,20 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
Handle<FixedArray> array =
isolate->factory()->NewFixedArray(max_number_of_properties);
for (Handle<TemplateInfoT> temp(*data); *temp != nullptr;
for (Handle<TemplateInfoT> temp(*data, isolate); *temp != nullptr;
temp = handle(temp->GetParent(isolate), isolate)) {
// Accumulate accessors.
Object* maybe_properties = temp->property_accessors();
if (!maybe_properties->IsUndefined(isolate)) {
valid_descriptors = AccessorInfo::AppendUnique(
handle(maybe_properties, isolate), array, valid_descriptors);
isolate, handle(maybe_properties, isolate), array,
valid_descriptors);
}
}
// Install accumulated accessors.
for (int i = 0; i < valid_descriptors; i++) {
Handle<AccessorInfo> accessor(AccessorInfo::cast(array->get(i)));
Handle<AccessorInfo> accessor(AccessorInfo::cast(array->get(i)), isolate);
Handle<Name> name(Name::cast(accessor->name()), isolate);
JSObject::SetAccessor(obj, name, accessor,
accessor->initial_property_attributes())
@ -290,7 +291,7 @@ MaybeHandle<JSObject> ProbeInstantiationsCache(Isolate* isolate,
TemplateInfo::kSlowTemplateInstantiationsCacheSize)) {
Handle<SimpleNumberDictionary> slow_cache =
isolate->slow_template_instantiations_cache();
int entry = slow_cache->FindEntry(serial_number);
int entry = slow_cache->FindEntry(isolate, serial_number);
if (entry == SimpleNumberDictionary::kNotFound) {
return MaybeHandle<JSObject>();
}
@ -308,7 +309,7 @@ void CacheTemplateInstantiation(Isolate* isolate, int serial_number,
Handle<FixedArray> fast_cache =
isolate->fast_template_instantiations_cache();
Handle<FixedArray> new_cache =
FixedArray::SetAndGrow(fast_cache, serial_number - 1, object);
FixedArray::SetAndGrow(isolate, fast_cache, serial_number - 1, object);
if (*new_cache != *fast_cache) {
isolate->native_context()->set_fast_template_instantiations_cache(
*new_cache);
@ -318,7 +319,8 @@ void CacheTemplateInstantiation(Isolate* isolate, int serial_number,
TemplateInfo::kSlowTemplateInstantiationsCacheSize)) {
Handle<SimpleNumberDictionary> cache =
isolate->slow_template_instantiations_cache();
auto new_cache = SimpleNumberDictionary::Set(cache, serial_number, object);
auto new_cache =
SimpleNumberDictionary::Set(isolate, cache, serial_number, object);
if (*new_cache != *cache) {
isolate->native_context()->set_slow_template_instantiations_cache(
*new_cache);
@ -339,9 +341,9 @@ void UncacheTemplateInstantiation(Isolate* isolate, int serial_number,
TemplateInfo::kSlowTemplateInstantiationsCacheSize)) {
Handle<SimpleNumberDictionary> cache =
isolate->slow_template_instantiations_cache();
int entry = cache->FindEntry(serial_number);
int entry = cache->FindEntry(isolate, serial_number);
DCHECK_NE(SimpleNumberDictionary::kNotFound, entry);
cache = SimpleNumberDictionary::DeleteEntry(cache, entry);
cache = SimpleNumberDictionary::DeleteEntry(isolate, cache, entry);
isolate->native_context()->set_slow_template_instantiations_cache(*cache);
}
}
@ -443,7 +445,7 @@ MaybeHandle<Object> GetInstancePrototype(Isolate* isolate,
// TODO(cbruni): decide what to do here.
ASSIGN_RETURN_ON_EXCEPTION(
isolate, instance_prototype,
JSObject::GetProperty(parent_instance,
JSObject::GetProperty(isolate, parent_instance,
isolate->factory()->prototype_string()),
JSFunction);
return scope.CloseAndEscape(instance_prototype);
@ -544,8 +546,8 @@ MaybeHandle<JSFunction> ApiNatives::InstantiateFunction(
}
MaybeHandle<JSObject> ApiNatives::InstantiateObject(
Handle<ObjectTemplateInfo> data, Handle<JSReceiver> new_target) {
Isolate* isolate = data->GetIsolate();
Isolate* isolate, Handle<ObjectTemplateInfo> data,
Handle<JSReceiver> new_target) {
InvokeScope invoke_scope(isolate);
return ::v8::internal::InstantiateObject(isolate, data, new_target, false,
false);
@ -557,7 +559,7 @@ MaybeHandle<JSObject> ApiNatives::InstantiateRemoteObject(
InvokeScope invoke_scope(isolate);
Handle<FunctionTemplateInfo> constructor(
FunctionTemplateInfo::cast(data->constructor()));
FunctionTemplateInfo::cast(data->constructor()), isolate);
Handle<Map> object_map = isolate->factory()->NewMap(
JS_SPECIAL_API_OBJECT_TYPE,
JSObject::kHeaderSize + data->embedder_field_count() * kPointerSize,
@ -654,7 +656,7 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
if (prototype->IsTheHole(isolate)) {
prototype = isolate->factory()->NewFunctionPrototype(result);
} else if (obj->prototype_provider_template()->IsUndefined(isolate)) {
JSObject::AddProperty(Handle<JSObject>::cast(prototype),
JSObject::AddProperty(isolate, Handle<JSObject>::cast(prototype),
isolate->factory()->constructor_string(), result,
DONT_ENUM);
}
@ -663,7 +665,7 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
bool immutable_proto = false;
if (!obj->instance_template()->IsUndefined(isolate)) {
Handle<ObjectTemplateInfo> instance_template = Handle<ObjectTemplateInfo>(
ObjectTemplateInfo::cast(obj->instance_template()));
ObjectTemplateInfo::cast(obj->instance_template()), isolate);
embedder_field_count = instance_template->embedder_field_count();
immutable_proto = instance_template->immutable_proto();
}

View File

@ -26,7 +26,7 @@ class ApiNatives {
MaybeHandle<Name> maybe_name = MaybeHandle<Name>());
V8_WARN_UNUSED_RESULT static MaybeHandle<JSObject> InstantiateObject(
Handle<ObjectTemplateInfo> data,
Isolate* isolate, Handle<ObjectTemplateInfo> data,
Handle<JSReceiver> new_target = Handle<JSReceiver>());
V8_WARN_UNUSED_RESULT static MaybeHandle<JSObject> InstantiateRemoteObject(

907
deps/v8/src/api.cc vendored

File diff suppressed because it is too large Load Diff

12
deps/v8/src/api.h vendored
View File

@ -11,7 +11,11 @@
#include "src/detachable-vector.h"
#include "src/heap/factory.h"
#include "src/isolate.h"
#include "src/objects/bigint.h"
#include "src/objects/js-collection.h"
#include "src/objects/js-promise.h"
#include "src/objects/module.h"
#include "src/objects/templates.h"
namespace v8 {
@ -248,9 +252,7 @@ OPEN_HANDLE_LIST(DECLARE_OPEN_HANDLE)
template<class From, class To>
static inline Local<To> Convert(v8::internal::Handle<From> obj) {
DCHECK(obj.is_null() ||
(obj->IsSmi() ||
!obj->IsTheHole(i::HeapObject::cast(*obj)->GetIsolate())));
DCHECK(obj.is_null() || (obj->IsSmi() || !obj->IsTheHole()));
return Local<To>(reinterpret_cast<To*>(obj.location()));
}
@ -635,7 +637,7 @@ bool HandleScopeImplementer::LastEnteredContextWas(Handle<Context> context) {
Handle<Context> HandleScopeImplementer::LastEnteredContext() {
if (entered_contexts_.empty()) return Handle<Context>::null();
return Handle<Context>(entered_contexts_.back());
return Handle<Context>(entered_contexts_.back(), isolate_);
}
void HandleScopeImplementer::EnterMicrotaskContext(Handle<Context> context) {
@ -650,7 +652,7 @@ void HandleScopeImplementer::LeaveMicrotaskContext() {
}
Handle<Context> HandleScopeImplementer::MicrotaskContext() {
if (microtask_context_) return Handle<Context>(microtask_context_);
if (microtask_context_) return Handle<Context>(microtask_context_, isolate_);
return Handle<Context>::null();
}

View File

@ -81,7 +81,7 @@ double ClobberDoubleRegisters(double x1, double x2, double x3, double x4);
// TODO(cbruni): add global flag to check whether any tracing events have been
// enabled.
#define RUNTIME_FUNCTION_RETURNS_TYPE(Type, Name) \
static INLINE(Type __RT_impl_##Name(Arguments args, Isolate* isolate)); \
static V8_INLINE Type __RT_impl_##Name(Arguments args, Isolate* isolate); \
\
V8_NOINLINE static Type Stats_##Name(int args_length, Object** args_object, \
Isolate* isolate) { \

View File

@ -60,14 +60,17 @@ void RelocInfo::apply(intptr_t delta) {
// absolute code pointer inside code object moves with the code object.
int32_t* p = reinterpret_cast<int32_t*>(pc_);
*p += delta; // relocate entry
} else if (RelocInfo::IsRelativeCodeTarget(rmode_)) {
Instruction* branch = Instruction::At(pc_);
int32_t branch_offset = branch->GetBranchOffset() + delta;
branch->SetBranchOffset(branch_offset);
}
// We do not use pc relative addressing on ARM, so there is
// nothing else to do.
}
Address RelocInfo::target_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
DCHECK(IsCodeTargetMode(rmode_) || IsRuntimeEntry(rmode_) ||
IsWasmCall(rmode_));
return Assembler::target_address_at(pc_, constant_pool_);
}
@ -101,12 +104,15 @@ HeapObject* RelocInfo::target_object() {
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Handle<HeapObject>(reinterpret_cast<HeapObject**>(
Assembler::target_address_at(pc_, constant_pool_)));
if (IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT) {
return Handle<HeapObject>(reinterpret_cast<HeapObject**>(
Assembler::target_address_at(pc_, constant_pool_)));
}
DCHECK(IsRelativeCodeTarget(rmode_));
return origin->relative_code_target_object_handle_at(pc_);
}
void RelocInfo::set_target_object(HeapObject* target,
void RelocInfo::set_target_object(Heap* heap, HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
@ -114,9 +120,8 @@ void RelocInfo::set_target_object(HeapObject* target,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
target);
host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
heap->incremental_marking()->RecordWriteIntoCode(host(), this, target);
heap->RecordWriteIntoCode(host(), this, target);
}
}
@ -144,13 +149,6 @@ Address RelocInfo::target_internal_reference_address() {
return pc_;
}
void RelocInfo::set_wasm_code_table_entry(Address target,
ICacheFlushMode icache_flush_mode) {
DCHECK(rmode_ == RelocInfo::WASM_CODE_TABLE_ENTRY);
Assembler::set_target_address_at(pc_, constant_pool_, target,
icache_flush_mode);
}
Address RelocInfo::target_runtime_entry(Assembler* origin) {
DCHECK(IsRuntimeEntry(rmode_));
return target_address();
@ -180,12 +178,19 @@ void RelocInfo::WipeOut() {
}
}
Handle<Code> Assembler::relative_code_target_object_handle_at(
Address pc) const {
Instruction* branch = Instruction::At(pc);
int code_target_index = branch->GetBranchOffset() / Instruction::kInstrSize;
return GetCodeTarget(code_target_index);
}
template <typename ObjectVisitor>
void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(host(), this);
} else if (RelocInfo::IsCodeTarget(mode)) {
} else if (RelocInfo::IsCodeTargetMode(mode)) {
visitor->VisitCodeTarget(host(), this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(host(), this);
@ -317,7 +322,7 @@ Address Assembler::constant_pool_entry_address(Address pc,
Address constant_pool) {
DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc)));
Instr instr = Memory::int32_at(pc);
return pc + GetLdrRegisterImmediateOffset(instr) + kPcLoadDelta;
return pc + GetLdrRegisterImmediateOffset(instr) + Instruction::kPcLoadDelta;
}
@ -325,7 +330,7 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
if (is_constant_pool_load(pc)) {
// This is a constant pool lookup. Return the value in the constant pool.
return Memory::Address_at(constant_pool_entry_address(pc, constant_pool));
} else if (CpuFeatures::IsSupported(ARMv7)) {
} else if (CpuFeatures::IsSupported(ARMv7) && IsMovW(Memory::int32_at(pc))) {
// This is an movw / movt immediate load. Return the immediate.
DCHECK(IsMovW(Memory::int32_at(pc)) &&
IsMovT(Memory::int32_at(pc + kInstrSize)));
@ -333,7 +338,7 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
Instruction* movt_instr = Instruction::At(pc + kInstrSize);
return static_cast<Address>((movt_instr->ImmedMovwMovtValue() << 16) |
movw_instr->ImmedMovwMovtValue());
} else {
} else if (IsMovImmed(Memory::int32_at(pc))) {
// This is an mov / orr immediate load. Return the immediate.
DCHECK(IsMovImmed(Memory::int32_at(pc)) &&
IsOrrImmed(Memory::int32_at(pc + kInstrSize)) &&
@ -347,6 +352,10 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
DecodeShiftImm(mov_instr) | DecodeShiftImm(orr_instr_1) |
DecodeShiftImm(orr_instr_2) | DecodeShiftImm(orr_instr_3));
return ret;
} else {
Instruction* branch = Instruction::At(pc);
int32_t delta = branch->GetBranchOffset();
return pc + delta + Instruction::kPcLoadDelta;
}
}
@ -364,7 +373,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
// ldr ip, [pp, #...]
// since the instruction accessing this address in the constant pool remains
// unchanged.
} else if (CpuFeatures::IsSupported(ARMv7)) {
} else if (CpuFeatures::IsSupported(ARMv7) && IsMovW(Memory::int32_at(pc))) {
// This is an movw / movt immediate load. Patch the immediate embedded in
// the instructions.
DCHECK(IsMovW(Memory::int32_at(pc)));
@ -378,7 +387,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(pc, 2 * kInstrSize);
}
} else {
} else if (IsMovImmed(Memory::int32_at(pc))) {
// This is an mov / orr immediate load. Patch the immediate embedded in
// the instructions.
DCHECK(IsMovImmed(Memory::int32_at(pc)) &&
@ -386,7 +395,7 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)) &&
IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)));
uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc);
uint32_t immediate = reinterpret_cast<uint32_t>(target);
uint32_t immediate = static_cast<uint32_t>(target);
instr_ptr[0] = PatchShiftImm(instr_ptr[0], immediate & kImm8Mask);
instr_ptr[1] = PatchShiftImm(instr_ptr[1], immediate & (kImm8Mask << 8));
instr_ptr[2] = PatchShiftImm(instr_ptr[2], immediate & (kImm8Mask << 16));
@ -398,6 +407,13 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(pc, 4 * kInstrSize);
}
} else {
intptr_t branch_offset = target - pc - Instruction::kPcLoadDelta;
Instruction* branch = Instruction::At(pc);
branch->SetBranchOffset(branch_offset);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(pc, kInstrSize);
}
}
}

View File

@ -43,6 +43,7 @@
#include "src/base/bits.h"
#include "src/base/cpu.h"
#include "src/code-stubs.h"
#include "src/deoptimizer.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
@ -323,8 +324,8 @@ void CpuFeatures::PrintFeatures() {
// Implementation of RelocInfo
// static
const int RelocInfo::kApplyMask = 0;
const int RelocInfo::kApplyMask =
RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET);
bool RelocInfo::IsCodedSpecially() {
// The deserializer needs to know whether a pointer is specially coded.  Being
@ -333,39 +334,31 @@ bool RelocInfo::IsCodedSpecially() {
return false;
}
bool RelocInfo::IsInConstantPool() {
return Assembler::is_constant_pool_load(pc_);
}
Address RelocInfo::embedded_address() const {
return Assembler::target_address_at(pc_, constant_pool_);
}
uint32_t RelocInfo::embedded_size() const {
return reinterpret_cast<uint32_t>(
Assembler::target_address_at(pc_, constant_pool_));
}
void RelocInfo::set_embedded_address(Address address,
ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(pc_, constant_pool_, address, flush_mode);
}
void RelocInfo::set_embedded_size(uint32_t size, ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(pc_, constant_pool_,
reinterpret_cast<Address>(size), flush_mode);
int RelocInfo::GetDeoptimizationId(Isolate* isolate, DeoptimizeKind kind) {
DCHECK(IsRuntimeEntry(rmode_));
return Deoptimizer::GetDeoptimizationId(isolate, target_address(), kind);
}
void RelocInfo::set_js_to_wasm_address(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
set_embedded_address(address, icache_flush_mode);
Assembler::set_target_address_at(pc_, constant_pool_, address,
icache_flush_mode);
}
Address RelocInfo::js_to_wasm_address() const {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
return embedded_address();
return Assembler::target_address_at(pc_, constant_pool_);
}
uint32_t RelocInfo::wasm_call_tag() const {
DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
return static_cast<uint32_t>(
Assembler::target_address_at(pc_, constant_pool_));
}
// -----------------------------------------------------------------------------
@ -483,8 +476,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Handle<HeapObject> object;
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber:
object = isolate->factory()->NewHeapNumber(request.heap_number(),
IMMUTABLE, TENURED);
object =
isolate->factory()->NewHeapNumber(request.heap_number(), TENURED);
break;
case HeapObjectRequest::kCodeStub:
request.code_stub()->set_isolate(isolate);
@ -544,8 +537,9 @@ const Instr kLdrRegFpNegOffsetPattern =
const Instr kStrRegFpNegOffsetPattern = al | B26 | NegOffset | fp.code() * B16;
const Instr kLdrStrInstrTypeMask = 0xFFFF0000;
Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
: AssemblerBase(isolate_data, buffer, buffer_size),
Assembler::Assembler(const AssemblerOptions& options, void* buffer,
int buffer_size)
: AssemblerBase(options, buffer, buffer_size),
pending_32_bit_constants_(),
pending_64_bit_constants_(),
scratch_register_list_(ip.bit()) {
@ -553,7 +547,6 @@ Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
pending_64_bit_constants_.reserve(kMinNumPendingConstants);
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
next_buffer_check_ = 0;
code_target_sharing_blocked_nesting_ = 0;
const_pool_blocked_nesting_ = 0;
no_const_pool_before_ = 0;
first_const_pool_32_use_ = -1;
@ -576,7 +569,6 @@ Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
Assembler::~Assembler() {
DCHECK_EQ(const_pool_blocked_nesting_, 0);
DCHECK_EQ(code_target_sharing_blocked_nesting_, 0);
}
void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
@ -620,20 +612,6 @@ Condition Assembler::GetCondition(Instr instr) {
return Instruction::ConditionField(instr);
}
bool Assembler::IsBranch(Instr instr) {
return (instr & (B27 | B25)) == (B27 | B25);
}
int Assembler::GetBranchOffset(Instr instr) {
DCHECK(IsBranch(instr));
// Take the jump offset in the lower 24 bits, sign extend it and multiply it
// with 4 to get the offset in bytes.
return ((instr & kImm24Mask) << 8) >> 6;
}
bool Assembler::IsLdrRegisterImmediate(Instr instr) {
return (instr & (B27 | B26 | B25 | B22 | B20)) == (B26 | B20);
}
@ -851,7 +829,7 @@ int Assembler::target_at(int pos) {
// blx uses bit 24 to encode bit 2 of imm26
imm26 += 2;
}
return pos + kPcLoadDelta + imm26;
return pos + Instruction::kPcLoadDelta + imm26;
}
@ -890,7 +868,7 @@ void Assembler::target_at_put(int pos, int target_pos) {
if (is_uint8(target24)) {
// If the target fits in a byte then only patch with a mov
// instruction.
PatchingAssembler patcher(isolate_data(),
PatchingAssembler patcher(options(),
reinterpret_cast<byte*>(buffer_ + pos), 1);
patcher.mov(dst, Operand(target24));
} else {
@ -899,12 +877,12 @@ void Assembler::target_at_put(int pos, int target_pos) {
if (CpuFeatures::IsSupported(ARMv7)) {
// Patch with movw/movt.
if (target16_1 == 0) {
PatchingAssembler patcher(isolate_data(),
PatchingAssembler patcher(options(),
reinterpret_cast<byte*>(buffer_ + pos), 1);
CpuFeatureScope scope(&patcher, ARMv7);
patcher.movw(dst, target16_0);
} else {
PatchingAssembler patcher(isolate_data(),
PatchingAssembler patcher(options(),
reinterpret_cast<byte*>(buffer_ + pos), 2);
CpuFeatureScope scope(&patcher, ARMv7);
patcher.movw(dst, target16_0);
@ -916,12 +894,12 @@ void Assembler::target_at_put(int pos, int target_pos) {
uint8_t target8_1 = target16_0 >> 8;
uint8_t target8_2 = target16_1 & kImm8Mask;
if (target8_2 == 0) {
PatchingAssembler patcher(isolate_data(),
PatchingAssembler patcher(options(),
reinterpret_cast<byte*>(buffer_ + pos), 2);
patcher.mov(dst, Operand(target8_0));
patcher.orr(dst, dst, Operand(target8_1 << 8));
} else {
PatchingAssembler patcher(isolate_data(),
PatchingAssembler patcher(options(),
reinterpret_cast<byte*>(buffer_ + pos), 3);
patcher.mov(dst, Operand(target8_0));
patcher.orr(dst, dst, Operand(target8_1 << 8));
@ -931,7 +909,7 @@ void Assembler::target_at_put(int pos, int target_pos) {
}
return;
}
int imm26 = target_pos - (pos + kPcLoadDelta);
int imm26 = target_pos - (pos + Instruction::kPcLoadDelta);
DCHECK_EQ(5 * B25, instr & 7 * B25); // b, bl, or blx imm24
if (Instruction::ConditionField(instr) == kSpecialCondition) {
// blx uses bit 24 to encode bit 2 of imm26
@ -1105,9 +1083,9 @@ bool FitsShifter(uint32_t imm32, uint32_t* rotate_imm, uint32_t* immed_8,
// space. There is no guarantee that the relocated location can be similarly
// encoded.
bool MustOutputRelocInfo(RelocInfo::Mode rmode, const Assembler* assembler) {
if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
if (assembler != nullptr && assembler->predictable_code_size()) return true;
return assembler->serializer_enabled();
if (RelocInfo::IsOnlyForSerializer(rmode)) {
if (assembler->predictable_code_size()) return true;
return assembler->options().record_reloc_info_for_serialization;
} else if (RelocInfo::IsNone(rmode)) {
return false;
}
@ -1167,6 +1145,7 @@ int Operand::InstructionsRequired(const Assembler* assembler,
void Assembler::Move32BitImmediate(Register rd, const Operand& x,
Condition cond) {
if (UseMovImmediateLoad(x, this)) {
CpuFeatureScope scope(this, ARMv7);
// UseMovImmediateLoad should return false when we need to output
// relocation info, since we prefer the constant pool for values that
// can be patched.
@ -1174,12 +1153,9 @@ void Assembler::Move32BitImmediate(Register rd, const Operand& x,
UseScratchRegisterScope temps(this);
// Re-use the destination register as a scratch if possible.
Register target = rd != pc ? rd : temps.Acquire();
if (CpuFeatures::IsSupported(ARMv7)) {
uint32_t imm32 = static_cast<uint32_t>(x.immediate());
CpuFeatureScope scope(this, ARMv7);
movw(target, imm32 & 0xFFFF, cond);
movt(target, imm32 >> 16, cond);
}
uint32_t imm32 = static_cast<uint32_t>(x.immediate());
movw(target, imm32 & 0xFFFF, cond);
movt(target, imm32 >> 16, cond);
if (target.code() != rd.code()) {
mov(rd, target, LeaveCC, cond);
}
@ -1436,15 +1412,17 @@ int Assembler::branch_offset(Label* L) {
// be emitted at the pc offset recorded by the label.
if (!is_const_pool_blocked()) BlockConstPoolFor(1);
return target_pos - (pc_offset() + kPcLoadDelta);
return target_pos - (pc_offset() + Instruction::kPcLoadDelta);
}
// Branch instructions.
void Assembler::b(int branch_offset, Condition cond) {
void Assembler::b(int branch_offset, Condition cond, RelocInfo::Mode rmode) {
RecordRelocInfo(rmode);
DCHECK_EQ(branch_offset & 3, 0);
int imm24 = branch_offset >> 2;
CHECK(is_int24(imm24));
const bool b_imm_check = is_int24(imm24);
CHECK(b_imm_check);
emit(cond | B27 | B25 | (imm24 & kImm24Mask));
if (cond == al) {
@ -1453,11 +1431,12 @@ void Assembler::b(int branch_offset, Condition cond) {
}
}
void Assembler::bl(int branch_offset, Condition cond) {
void Assembler::bl(int branch_offset, Condition cond, RelocInfo::Mode rmode) {
RecordRelocInfo(rmode);
DCHECK_EQ(branch_offset & 3, 0);
int imm24 = branch_offset >> 2;
CHECK(is_int24(imm24));
const bool bl_imm_check = is_int24(imm24);
CHECK(bl_imm_check);
emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask));
}
@ -1465,7 +1444,8 @@ void Assembler::blx(int branch_offset) {
DCHECK_EQ(branch_offset & 1, 0);
int h = ((branch_offset & 2) >> 1)*B24;
int imm24 = branch_offset >> 2;
CHECK(is_int24(imm24));
const bool blx_imm_check = is_int24(imm24);
CHECK(blx_imm_check);
emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask));
}
@ -5145,10 +5125,11 @@ void Assembler::dq(uint64_t value) {
}
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
if (options().disable_reloc_info_for_patching) return;
if (RelocInfo::IsNone(rmode) ||
// Don't record external references unless the heap will be serialized.
(rmode == RelocInfo::EXTERNAL_REFERENCE && !serializer_enabled() &&
!emit_debug_code())) {
(RelocInfo::IsOnlyForSerializer(rmode) &&
!options().record_reloc_info_for_serialization && !emit_debug_code())) {
return;
}
DCHECK_GE(buffer_space(), kMaxRelocSize); // too late to grow buffer here
@ -5159,16 +5140,14 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
intptr_t value) {
DCHECK(rmode != RelocInfo::COMMENT && rmode != RelocInfo::CONST_POOL);
bool sharing_ok = RelocInfo::IsNone(rmode) ||
(rmode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE);
bool sharing_ok =
RelocInfo::IsNone(rmode) || RelocInfo::IsShareableRelocMode(rmode);
DCHECK_LT(pending_32_bit_constants_.size(), kMaxNumPending32Constants);
if (pending_32_bit_constants_.empty()) {
first_const_pool_32_use_ = position;
}
ConstantPoolEntry entry(position, value,
sharing_ok || (rmode == RelocInfo::CODE_TARGET &&
IsCodeTargetSharingAllowed()),
rmode);
ConstantPoolEntry entry(
position, value, sharing_ok || (rmode == RelocInfo::CODE_TARGET), rmode);
bool shared = false;
if (sharing_ok) {
@ -5187,8 +5166,7 @@ void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
// Share entries if allowed and possible.
// Null-values are placeholders and must be ignored.
if (rmode == RelocInfo::CODE_TARGET && IsCodeTargetSharingAllowed() &&
value != 0) {
if (rmode == RelocInfo::CODE_TARGET && value != 0) {
// Sharing entries here relies on canonicalized handles - without them, we
// will miss the optimisation opportunity.
Address handle_address = static_cast<Address>(value);
@ -5384,7 +5362,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
DCHECK((IsVldrDPcImmediateOffset(instr) &&
GetVldrDRegisterImmediateOffset(instr) == 0));
int delta = pc_offset() - entry.position() - kPcLoadDelta;
int delta = pc_offset() - entry.position() - Instruction::kPcLoadDelta;
DCHECK(is_uint10(delta));
if (entry.is_merged()) {
@ -5415,7 +5393,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
DCHECK(IsLdrPcImmediateOffset(instr) &&
GetLdrRegisterImmediateOffset(instr) == 0);
int delta = pc_offset() - entry.position() - kPcLoadDelta;
int delta = pc_offset() - entry.position() - Instruction::kPcLoadDelta;
DCHECK(is_uint12(delta));
// 0 is the smallest delta:
// ldr rd, [pc, #0]
@ -5460,9 +5438,9 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
next_buffer_check_ = pc_offset() + kCheckPoolInterval;
}
PatchingAssembler::PatchingAssembler(IsolateData isolate_data, byte* address,
int instructions)
: Assembler(isolate_data, address, instructions * kInstrSize + kGap) {
PatchingAssembler::PatchingAssembler(const AssemblerOptions& options,
byte* address, int instructions)
: Assembler(options, address, instructions * kInstrSize + kGap) {
DCHECK_EQ(reloc_info_writer.pos(), buffer_ + buffer_size_);
}

View File

@ -397,26 +397,26 @@ enum Coprocessor {
class Operand BASE_EMBEDDED {
public:
// immediate
INLINE(explicit Operand(int32_t immediate,
RelocInfo::Mode rmode = RelocInfo::NONE));
INLINE(static Operand Zero());
INLINE(explicit Operand(const ExternalReference& f));
V8_INLINE explicit Operand(int32_t immediate,
RelocInfo::Mode rmode = RelocInfo::NONE);
V8_INLINE static Operand Zero();
V8_INLINE explicit Operand(const ExternalReference& f);
explicit Operand(Handle<HeapObject> handle);
INLINE(explicit Operand(Smi* value));
V8_INLINE explicit Operand(Smi* value);
// rm
INLINE(explicit Operand(Register rm));
V8_INLINE explicit Operand(Register rm);
// rm <shift_op> shift_imm
explicit Operand(Register rm, ShiftOp shift_op, int shift_imm);
INLINE(static Operand SmiUntag(Register rm)) {
V8_INLINE static Operand SmiUntag(Register rm) {
return Operand(rm, ASR, kSmiTagSize);
}
INLINE(static Operand PointerOffsetFromSmiKey(Register key)) {
V8_INLINE static Operand PointerOffsetFromSmiKey(Register key) {
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
return Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize);
}
INLINE(static Operand DoubleOffsetFromSmiKey(Register key)) {
V8_INLINE static Operand DoubleOffsetFromSmiKey(Register key) {
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kDoubleSizeLog2);
return Operand(key, LSL, kDoubleSizeLog2 - kSmiTagSize);
}
@ -519,9 +519,9 @@ class MemOperand BASE_EMBEDDED {
// [rn], +/- rm <shift_op> shift_imm PostIndex/NegPostIndex
explicit MemOperand(Register rn, Register rm,
ShiftOp shift_op, int shift_imm, AddrMode am = Offset);
INLINE(static MemOperand PointerAddressFromSmiKey(Register array,
Register key,
AddrMode am = Offset)) {
V8_INLINE static MemOperand PointerAddressFromSmiKey(Register array,
Register key,
AddrMode am = Offset) {
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
return MemOperand(array, key, LSL, kPointerSizeLog2 - kSmiTagSize, am);
}
@ -628,9 +628,7 @@ class Assembler : public AssemblerBase {
// buffer for code generation and assumes its size to be buffer_size. If the
// buffer is too small, a fatal error occurs. No deallocation of the buffer is
// done upon destruction of the assembler.
Assembler(Isolate* isolate, void* buffer, int buffer_size)
: Assembler(IsolateData(isolate), buffer, buffer_size) {}
Assembler(IsolateData isolate_data, void* buffer, int buffer_size);
Assembler(const AssemblerOptions& options, void* buffer, int buffer_size);
virtual ~Assembler();
// GetCode emits any pending (non-emitted) code and fills the descriptor
@ -662,27 +660,27 @@ class Assembler : public AssemblerBase {
// Returns true if the given pc address is the start of a constant pool load
// instruction sequence.
INLINE(static bool is_constant_pool_load(Address pc));
V8_INLINE static bool is_constant_pool_load(Address pc);
// Return the address in the constant pool of the code target address used by
// the branch/call instruction at pc, or the object in a mov.
INLINE(static Address constant_pool_entry_address(Address pc,
Address constant_pool));
V8_INLINE static Address constant_pool_entry_address(Address pc,
Address constant_pool);
// Read/Modify the code target address in the branch/call instruction at pc.
// The isolate argument is unused (and may be nullptr) when skipping flushing.
INLINE(static Address target_address_at(Address pc, Address constant_pool));
INLINE(static void set_target_address_at(
V8_INLINE static Address target_address_at(Address pc, Address constant_pool);
V8_INLINE static void set_target_address_at(
Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
INLINE(static Address target_address_from_return_address(Address pc));
V8_INLINE static Address target_address_from_return_address(Address pc);
// Given the address of the beginning of a call, return the address
// in the instruction stream that the call will return from.
INLINE(static Address return_address_from_call_start(Address pc));
V8_INLINE static Address return_address_from_call_start(Address pc);
// This sets the branch destination (which is in the constant pool on ARM).
// This is for calls and branches within generated code.
@ -705,9 +703,6 @@ class Assembler : public AssemblerBase {
// Size of an instruction.
static constexpr int kInstrSize = sizeof(Instr);
// Difference between address of current opcode and value read from pc
// register.
static constexpr int kPcLoadDelta = 8;
RegList* GetScratchRegisterList() { return &scratch_register_list_; }
VfpRegList* GetScratchVfpRegisterList() {
return &scratch_vfp_register_list_;
@ -727,8 +722,10 @@ class Assembler : public AssemblerBase {
void CodeTargetAlign();
// Branch instructions
void b(int branch_offset, Condition cond = al);
void bl(int branch_offset, Condition cond = al);
void b(int branch_offset, Condition cond = al,
RelocInfo::Mode rmode = RelocInfo::NONE);
void bl(int branch_offset, Condition cond = al,
RelocInfo::Mode rmode = RelocInfo::NONE);
void blx(int branch_offset); // v5 and above
void blx(Register target, Condition cond = al); // v5 and above
void bx(Register target, Condition cond = al); // v5 and above, plus v4t
@ -1427,36 +1424,6 @@ class Assembler : public AssemblerBase {
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
};
// Class for blocking sharing of code targets in constant pool.
class BlockCodeTargetSharingScope {
public:
explicit BlockCodeTargetSharingScope(Assembler* assem) : assem_(nullptr) {
Open(assem);
}
// This constructor does not initialize the scope. The user needs to
// explicitly call Open() before using it.
BlockCodeTargetSharingScope() : assem_(nullptr) {}
~BlockCodeTargetSharingScope() {
Close();
}
void Open(Assembler* assem) {
DCHECK_NULL(assem_);
DCHECK_NOT_NULL(assem);
assem_ = assem;
assem_->StartBlockCodeTargetSharing();
}
private:
void Close() {
if (assem_ != nullptr) {
assem_->EndBlockCodeTargetSharing();
}
}
Assembler* assem_;
DISALLOW_COPY_AND_ASSIGN(BlockCodeTargetSharingScope);
};
// Record a comment relocation entry that can be used by a disassembler.
// Use --code-comments to enable.
void RecordComment(const char* msg);
@ -1504,8 +1471,6 @@ class Assembler : public AssemblerBase {
*reinterpret_cast<Instr*>(pc) = instr;
}
static Condition GetCondition(Instr instr);
static bool IsBranch(Instr instr);
static int GetBranchOffset(Instr instr);
static bool IsLdrRegisterImmediate(Instr instr);
static bool IsVldrDRegisterImmediate(Instr instr);
static int GetLdrRegisterImmediateOffset(Instr instr);
@ -1579,6 +1544,13 @@ class Assembler : public AssemblerBase {
UNREACHABLE();
}
// Move a 32-bit immediate into a register, potentially via the constant pool.
void Move32BitImmediate(Register rd, const Operand& x, Condition cond = al);
// Get the code target object for a pc-relative call or jump.
V8_INLINE Handle<Code> relative_code_target_object_handle_at(
Address pc_) const;
protected:
int buffer_space() const { return reloc_info_writer.pos() - pc_; }
@ -1588,20 +1560,6 @@ class Assembler : public AssemblerBase {
// Patch branch instruction at pos to branch to given branch target pos
void target_at_put(int pos, int target_pos);
// Prevent sharing of code target constant pool entries until
// EndBlockCodeTargetSharing is called. Calls to this function can be nested
// but must be followed by an equal number of call to
// EndBlockCodeTargetSharing.
void StartBlockCodeTargetSharing() {
++code_target_sharing_blocked_nesting_;
}
// Resume sharing of constant pool code target entries. Needs to be called
// as many times as StartBlockCodeTargetSharing to have an effect.
void EndBlockCodeTargetSharing() {
--code_target_sharing_blocked_nesting_;
}
// Prevent contant pool emission until EndBlockConstPool is called.
// Calls to this function can be nested but must be followed by an equal
// number of call to EndBlockConstpool.
@ -1709,12 +1667,6 @@ class Assembler : public AssemblerBase {
static constexpr int kCheckPoolIntervalInst = 32;
static constexpr int kCheckPoolInterval = kCheckPoolIntervalInst * kInstrSize;
// Sharing of code target entries may be blocked in some code sequences.
int code_target_sharing_blocked_nesting_;
bool IsCodeTargetSharingAllowed() const {
return code_target_sharing_blocked_nesting_ == 0;
}
// Emission of the constant pool may be blocked in some code sequences.
int const_pool_blocked_nesting_; // Block emission if this is not zero.
int no_const_pool_before_; // Block emission before this pc offset.
@ -1730,9 +1682,6 @@ class Assembler : public AssemblerBase {
inline void CheckBuffer();
void GrowBuffer();
// 32-bit immediate values
void Move32BitImmediate(Register rd, const Operand& x, Condition cond = al);
// Instruction generation
void AddrMode1(Instr instr, Register rd, Register rn, const Operand& x);
// Attempt to encode operand |x| for instruction |instr| and return true on
@ -1757,35 +1706,23 @@ class Assembler : public AssemblerBase {
void ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
intptr_t value);
void ConstantPoolAddEntry(int position, Double value);
void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
friend class RelocInfo;
friend class BlockConstPoolScope;
friend class BlockCodeTargetSharingScope;
friend class EnsureSpace;
friend class UseScratchRegisterScope;
// The following functions help with avoiding allocations of embedded heap
// objects during the code assembly phase. {RequestHeapObject} records the
// need for a future heap number allocation or code stub generation. After
// code assembly, {AllocateAndInstallRequestedHeapObjects} will allocate these
// objects and place them where they are expected (determined by the pc offset
// associated with each request). That is, for each request, it will patch the
// dummy heap object handle that we emitted during code assembly with the
// actual heap object handle.
void RequestHeapObject(HeapObjectRequest request);
void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
std::forward_list<HeapObjectRequest> heap_object_requests_;
};
class EnsureSpace BASE_EMBEDDED {
public:
INLINE(explicit EnsureSpace(Assembler* assembler));
V8_INLINE explicit EnsureSpace(Assembler* assembler);
};
class PatchingAssembler : public Assembler {
public:
PatchingAssembler(IsolateData isolate_data, byte* address, int instructions);
PatchingAssembler(const AssemblerOptions& options, byte* address,
int instructions);
~PatchingAssembler();
void Emit(Address addr);

View File

@ -30,20 +30,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
__ lsl(r5, r0, Operand(kPointerSizeLog2));
__ str(r1, MemOperand(sp, r5));
__ Push(r1);
__ Push(r2);
__ add(r0, r0, Operand(3));
__ TailCallRuntime(Runtime::kNewArray);
}
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
}
void JSEntryStub::Generate(MacroAssembler* masm) {
// r0: code entry
// r1: function
@ -216,6 +202,18 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
Register target) {
if (FLAG_embedded_builtins) {
if (masm->root_array_available() &&
isolate()->ShouldLoadConstantsFromRootList()) {
// This is basically an inlined version of Call(Handle<Code>) that loads
// the code object into lr instead of ip.
__ Move(ip, target);
__ IndirectLoadConstant(lr, GetCode());
__ add(lr, lr, Operand(Code::kHeaderSize - kHeapObjectTag));
__ blx(lr);
return;
}
}
intptr_t code =
reinterpret_cast<intptr_t>(GetCode().location());
__ Move(ip, target);
@ -315,280 +313,6 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
__ ldm(ia_w, sp, kSavedRegs | pc.bit());
}
template<class T>
static void CreateArrayDispatch(MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
if (mode == DISABLE_ALLOCATION_SITES) {
T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
int last_index =
GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= last_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
__ cmp(r3, Operand(kind));
T stub(masm->isolate(), kind);
__ TailCallStub(&stub, eq);
}
// If we reached this point there is a problem.
__ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
} else {
UNREACHABLE();
}
}
static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
// r2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
// r3 - kind (if mode != DISABLE_ALLOCATION_SITES)
// r0 - number of arguments
// r1 - constructor?
// sp[0] - last argument
STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
STATIC_ASSERT(HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(PACKED_ELEMENTS == 2);
STATIC_ASSERT(HOLEY_ELEMENTS == 3);
STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS == 4);
STATIC_ASSERT(HOLEY_DOUBLE_ELEMENTS == 5);
if (mode == DISABLE_ALLOCATION_SITES) {
ElementsKind initial = GetInitialFastElementsKind();
ElementsKind holey_initial = GetHoleyElementsKind(initial);
ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
holey_initial,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub_holey);
} else if (mode == DONT_OVERRIDE) {
// is the low bit set? If so, we are holey and that is good.
Label normal_sequence;
__ tst(r3, Operand(1));
__ b(ne, &normal_sequence);
// We are going to create a holey array, but our kind is non-holey.
// Fix kind and retry (only if we have an allocation site in the slot).
__ add(r3, r3, Operand(1));
if (FLAG_debug_code) {
__ ldr(r5, FieldMemOperand(r2, 0));
__ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
__ Assert(eq, AbortReason::kExpectedAllocationSite);
}
// Save the resulting elements kind in type info. We can't just store r3
// in the AllocationSite::transition_info field because elements kind is
// restricted to a portion of the field...upper bits need to be left alone.
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
__ ldr(r4, FieldMemOperand(
r2, AllocationSite::kTransitionInfoOrBoilerplateOffset));
__ add(r4, r4, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
__ str(r4, FieldMemOperand(
r2, AllocationSite::kTransitionInfoOrBoilerplateOffset));
__ bind(&normal_sequence);
int last_index =
GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= last_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
__ cmp(r3, Operand(kind));
ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
__ TailCallStub(&stub, eq);
}
// If we reached this point there is a problem.
__ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
} else {
UNREACHABLE();
}
}
template<class T>
static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
int to_index =
GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(isolate, kind);
stub.GetCode();
if (AllocationSite::ShouldTrack(kind)) {
T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
stub1.GetCode();
}
}
}
void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
isolate);
ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
isolate);
ArrayNArgumentsConstructorStub stub(isolate);
stub.GetCode();
ElementsKind kinds[2] = {PACKED_ELEMENTS, HOLEY_ELEMENTS};
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things
InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
stubh1.GetCode();
InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
stubh2.GetCode();
}
}
void ArrayConstructorStub::GenerateDispatchToArrayStub(
MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
Label not_zero_case, not_one_case;
__ tst(r0, r0);
__ b(ne, &not_zero_case);
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
__ bind(&not_zero_case);
__ cmp(r0, Operand(1));
__ b(gt, &not_one_case);
CreateArrayDispatchOneArgument(masm, mode);
__ bind(&not_one_case);
ArrayNArgumentsConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argc (only if argument_count() == ANY)
// -- r1 : constructor
// -- r2 : AllocationSite or undefined
// -- r3 : new target
// -- sp[0] : return address
// -- sp[4] : last argument
// -----------------------------------
if (FLAG_debug_code) {
// The array construct code is only set for the global and natives
// builtin Array functions which always have maps.
// Initial map for the builtin Array function should be a map.
__ ldr(r4, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ tst(r4, Operand(kSmiTagMask));
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction);
__ CompareObjectType(r4, r4, r5, MAP_TYPE);
__ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
// We should either have undefined in r2 or a valid AllocationSite
__ AssertUndefinedOrAllocationSite(r2, r4);
}
// Enter the context of the Array function.
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
Label subclassing;
__ cmp(r3, r1);
__ b(ne, &subclassing);
Label no_info;
// Get the elements kind and case on that.
__ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
__ b(eq, &no_info);
__ ldr(r3, FieldMemOperand(
r2, AllocationSite::kTransitionInfoOrBoilerplateOffset));
__ SmiUntag(r3);
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
__ and_(r3, r3, Operand(AllocationSite::ElementsKindBits::kMask));
GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
__ bind(&no_info);
GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
__ bind(&subclassing);
__ str(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
__ add(r0, r0, Operand(3));
__ Push(r3, r2);
__ JumpToExternalReference(ExternalReference::Create(Runtime::kNewArray));
}
void InternalArrayConstructorStub::GenerateCase(
MacroAssembler* masm, ElementsKind kind) {
__ cmp(r0, Operand(1));
InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
__ TailCallStub(&stub0, lo);
ArrayNArgumentsConstructorStub stubN(isolate());
__ TailCallStub(&stubN, hi);
if (IsFastPackedElementsKind(kind)) {
// We might need to create a holey array
// look at the first argument
__ ldr(r3, MemOperand(sp, 0));
__ cmp(r3, Operand::Zero());
InternalArraySingleArgumentConstructorStub
stub1_holey(isolate(), GetHoleyElementsKind(kind));
__ TailCallStub(&stub1_holey, ne);
}
InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
__ TailCallStub(&stub1);
}
void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argc
// -- r1 : constructor
// -- sp[0] : return address
// -- sp[4] : last argument
// -----------------------------------
if (FLAG_debug_code) {
// The array construct code is only set for the global and natives
// builtin Array functions which always have maps.
// Initial map for the builtin Array function should be a map.
__ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ tst(r3, Operand(kSmiTagMask));
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction);
__ CompareObjectType(r3, r3, r4, MAP_TYPE);
__ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
}
// Figure out the right elements kind
__ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
// Load the map's "bit field 2" into |result|. We only need the first byte,
// but the following bit field extraction takes care of that anyway.
__ ldr(r3, FieldMemOperand(r3, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
__ DecodeField<Map::ElementsKindBits>(r3);
if (FLAG_debug_code) {
Label done;
__ cmp(r3, Operand(PACKED_ELEMENTS));
__ b(eq, &done);
__ cmp(r3, Operand(HOLEY_ELEMENTS));
__ Assert(
eq,
AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray);
__ bind(&done);
}
Label fast_elements_case;
__ cmp(r3, Operand(PACKED_ELEMENTS));
__ b(eq, &fast_elements_case);
GenerateCase(masm, HOLEY_ELEMENTS);
__ bind(&fast_elements_case);
GenerateCase(masm, PACKED_ELEMENTS);
}
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
return ref0.address() - ref1.address();
}
@ -617,13 +341,13 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
Label profiler_disabled;
Label end_profiler_check;
__ mov(r9, Operand(ExternalReference::is_profiling_address(isolate)));
__ Move(r9, ExternalReference::is_profiling_address(isolate));
__ ldrb(r9, MemOperand(r9, 0));
__ cmp(r9, Operand(0));
__ b(eq, &profiler_disabled);
// Additional parameter is the address of the actual callback.
__ mov(r3, Operand(thunk_ref));
__ Move(r3, thunk_ref);
__ jmp(&end_profiler_check);
__ bind(&profiler_disabled);
@ -631,7 +355,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ bind(&end_profiler_check);
// Allocate HandleScope in callee-save registers.
__ mov(r9, Operand(next_address));
__ Move(r9, next_address);
__ ldr(r4, MemOperand(r9, kNextOffset));
__ ldr(r5, MemOperand(r9, kLimitOffset));
__ ldr(r6, MemOperand(r9, kLevelOffset));
@ -642,7 +366,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
FrameScope frame(masm, StackFrame::MANUAL);
__ PushSafepointRegisters();
__ PrepareCallCFunction(1);
__ mov(r0, Operand(ExternalReference::isolate_address(isolate)));
__ Move(r0, ExternalReference::isolate_address(isolate));
__ CallCFunction(ExternalReference::log_enter_external_function(), 1);
__ PopSafepointRegisters();
}
@ -657,7 +381,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
FrameScope frame(masm, StackFrame::MANUAL);
__ PushSafepointRegisters();
__ PrepareCallCFunction(1);
__ mov(r0, Operand(ExternalReference::isolate_address(isolate)));
__ Move(r0, ExternalReference::isolate_address(isolate));
__ CallCFunction(ExternalReference::log_leave_external_function(), 1);
__ PopSafepointRegisters();
}
@ -696,7 +420,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// Check if the function scheduled an exception.
__ LoadRoot(r4, Heap::kTheHoleValueRootIndex);
__ mov(r6, Operand(ExternalReference::scheduled_exception_address(isolate)));
__ Move(r6, ExternalReference::scheduled_exception_address(isolate));
__ ldr(r5, MemOperand(r6));
__ cmp(r4, r5);
__ b(ne, &promote_scheduled_exception);
@ -712,7 +436,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ str(r5, MemOperand(r9, kLimitOffset));
__ mov(r4, r0);
__ PrepareCallCFunction(1);
__ mov(r0, Operand(ExternalReference::isolate_address(isolate)));
__ Move(r0, ExternalReference::isolate_address(isolate));
__ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1);
__ mov(r0, r4);
__ jmp(&leave_exit_frame);
@ -759,8 +483,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// return value default
__ push(scratch0);
// isolate
__ mov(scratch1,
Operand(ExternalReference::isolate_address(masm->isolate())));
__ Move(scratch1, ExternalReference::isolate_address(masm->isolate()));
__ push(scratch1);
// holder
__ push(holder);
@ -829,7 +552,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
__ push(scratch);
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
__ Push(scratch, scratch);
__ mov(scratch, Operand(ExternalReference::isolate_address(isolate())));
__ Move(scratch, ExternalReference::isolate_address(isolate()));
__ Push(scratch, holder);
__ Push(Smi::kZero); // should_throw_on_error -> false
__ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));

View File

@ -11,6 +11,7 @@
#include "src/base/macros.h"
#include "src/boxed-float.h"
#include "src/globals.h"
#include "src/utils.h"
// ARM EABI is required.
#if defined(__arm__) && !defined(__ARM_EABI__)
@ -51,6 +52,12 @@ const int kNoRegister = -1;
const int kLdrMaxReachBits = 12;
const int kVldrMaxReachBits = 10;
// Actual value of root register is offset from the root array's start
// to take advantage of negative displacement values. Loads allow a uint12
// value with a separate sign bit (range [-4095, +4095]), so the first root
// is still addressable with a single load instruction.
constexpr int kRootRegisterBias = 4095;
// -----------------------------------------------------------------------------
// Conditions.
@ -462,15 +469,19 @@ class Instruction {
kPCReadOffset = 8
};
// Helper macro to define static accessors.
// We use the cast to char* trick to bypass the strict anti-aliasing rules.
#define DECLARE_STATIC_TYPED_ACCESSOR(return_type, Name) \
static inline return_type Name(Instr instr) { \
char* temp = reinterpret_cast<char*>(&instr); \
return reinterpret_cast<Instruction*>(temp)->Name(); \
}
// Difference between address of current opcode and value read from pc
// register.
static constexpr int kPcLoadDelta = 8;
#define DECLARE_STATIC_ACCESSOR(Name) DECLARE_STATIC_TYPED_ACCESSOR(int, Name)
// Helper macro to define static accessors.
// We use the cast to char* trick to bypass the strict anti-aliasing rules.
#define DECLARE_STATIC_TYPED_ACCESSOR(return_type, Name) \
static inline return_type Name(Instr instr) { \
char* temp = reinterpret_cast<char*>(&instr); \
return reinterpret_cast<Instruction*>(temp)->Name(); \
}
#define DECLARE_STATIC_ACCESSOR(Name) DECLARE_STATIC_TYPED_ACCESSOR(int, Name)
// Get the raw instruction bits.
inline Instr InstructionBits() const {
@ -624,7 +635,25 @@ class Instruction {
// Fields used in Branch instructions
inline int LinkValue() const { return Bit(24); }
inline int SImmed24Value() const { return ((InstructionBits() << 8) >> 8); }
inline int SImmed24Value() const {
return signed_bitextract_32(23, 0, InstructionBits());
}
bool IsBranch() { return Bit(27) == 1 && Bit(25) == 1; }
int GetBranchOffset() {
DCHECK(IsBranch());
return SImmed24Value() * kInstrSize;
}
void SetBranchOffset(int32_t branch_offset) {
DCHECK(IsBranch());
DCHECK_EQ(branch_offset % kInstrSize, 0);
int32_t new_imm24 = branch_offset / kInstrSize;
CHECK(is_int24(new_imm24));
SetInstructionBits((InstructionBits() & ~(kImm24Mask)) |
(new_imm24 & kImm24Mask));
}
// Fields used in Software interrupt instructions
inline SoftwareInterruptCodes SvcValue() const {
@ -729,6 +758,8 @@ class VFPRegisters {
static const char* names_[kNumVFPRegisters];
};
// Relative jumps on ARM can address ±32 MB.
constexpr size_t kMaxPCRelativeCodeRangeInMB = 32;
} // namespace internal
} // namespace v8

View File

@ -86,7 +86,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ JumpIfSmi(r1, &context_check);
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ bind(&context_check);
__ mov(r1, Operand(type())); // bailout type,
__ mov(r1, Operand(static_cast<int>(deopt_kind())));
// r2: bailout id already loaded.
// r3: code address or 0 already loaded.
__ str(r4, MemOperand(sp, 0 * kPointerSize)); // Fp-to-sp delta.

View File

@ -5,6 +5,9 @@
#ifndef V8_ARM_FRAME_CONSTANTS_ARM_H_
#define V8_ARM_FRAME_CONSTANTS_ARM_H_
#include "src/base/macros.h"
#include "src/frame-constants.h"
namespace v8 {
namespace internal {
@ -30,6 +33,19 @@ class ExitFrameConstants : public TypedFrameConstants {
static constexpr int kCallerSPDisplacement = 2 * kPointerSize;
};
class WasmCompileLazyFrameConstants : public TypedFrameConstants {
public:
static constexpr int kNumberOfSavedGpParamRegs = 4;
static constexpr int kNumberOfSavedFpParamRegs = 8;
// FP-relative.
static constexpr int kWasmInstanceOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(0);
static constexpr int kFixedFrameSizeFromFp =
TypedFrameConstants::kFixedFrameSizeFromFp +
kNumberOfSavedGpParamRegs * kPointerSize +
kNumberOfSavedFpParamRegs * kDoubleSize;
};
class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.

View File

@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/arm/interface-descriptors-arm.h"
#if V8_TARGET_ARCH_ARM
#include "src/interface-descriptors.h"
@ -59,13 +57,6 @@ const Register StoreTransitionDescriptor::MapRegister() { return r5; }
const Register ApiGetterDescriptor::HolderRegister() { return r0; }
const Register ApiGetterDescriptor::CallbackRegister() { return r3; }
const Register MathPowTaggedDescriptor::exponent() { return r2; }
const Register MathPowIntegerDescriptor::exponent() {
return MathPowTaggedDescriptor::exponent();
}
const Register GrowArrayElementsDescriptor::ObjectRegister() { return r0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return r3; }
@ -179,24 +170,7 @@ void ConstructStubDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ConstructTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r0 : number of arguments
// r1 : the target to call
// r3 : the new target
Register registers[] = {r1, r3, r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void TransitionElementsKindDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r0, r1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void AbortJSDescriptor::InitializePlatformSpecific(
void AbortDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1};
data->InitializePlatformSpecific(arraysize(registers), registers);
@ -204,41 +178,7 @@ void AbortJSDescriptor::InitializePlatformSpecific(
void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
data->InitializePlatformSpecific(0, nullptr, nullptr);
}
void ArrayConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
Register registers[] = {r1, r3, r0, r2};
data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
// r0 -- number of arguments
// r1 -- function
// r2 -- allocation site with elements kind
Register registers[] = {r1, r2, r0};
data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
// r0 -- number of arguments
// r1 -- function
// r2 -- allocation site with elements kind
Register registers[] = {r1, r2, r0};
data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
Register registers[] = {r1, r2, r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
data->InitializePlatformSpecific(0, nullptr);
}
void CompareDescriptor::InitializePlatformSpecific(
@ -247,7 +187,6 @@ void CompareDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void BinaryOpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1, r0};
@ -256,32 +195,24 @@ void BinaryOpDescriptor::InitializePlatformSpecific(
void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
r1, // JSFunction
r3, // the new target
r0, // actual number of arguments
r2, // expected number of arguments
};
data->InitializePlatformSpecific(arraysize(registers), registers,
&default_descriptor);
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
JavaScriptFrame::context_register(), // callee context
r4, // call_data
r2, // holder
r1, // api_function_address
};
data->InitializePlatformSpecific(arraysize(registers), registers,
&default_descriptor);
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
@ -314,7 +245,9 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
namespace {
void InterpreterCEntryDescriptor_InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r0, // argument count (argc)
@ -324,6 +257,18 @@ void InterpreterCEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
} // namespace
void InterpreterCEntry1Descriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
}
void InterpreterCEntry2Descriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
}
void ResumeGeneratorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {

View File

@ -1,26 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_ARM_INTERFACE_DESCRIPTORS_ARM_H_
#define V8_ARM_INTERFACE_DESCRIPTORS_ARM_H_
#include "src/interface-descriptors.h"
namespace v8 {
namespace internal {
class PlatformInterfaceDescriptor {
public:
explicit PlatformInterfaceDescriptor(TargetAddressStorageMode storage_mode)
: storage_mode_(storage_mode) {}
TargetAddressStorageMode storage_mode() { return storage_mode_; }
private:
TargetAddressStorageMode storage_mode_;
};
} // namespace internal
} // namespace v8
#endif // V8_ARM_INTERFACE_DESCRIPTORS_ARM_H_

View File

@ -11,7 +11,6 @@
#include "src/base/division-by-constant.h"
#include "src/base/utils/random-number-generator.h"
#include "src/bootstrapper.h"
#include "src/builtins/constants-table-builder.h"
#include "src/callable.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
@ -24,16 +23,17 @@
#include "src/objects-inl.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
#include "src/snapshot/serializer-common.h"
#include "src/snapshot/snapshot.h"
#include "src/arm/macro-assembler-arm.h"
namespace v8 {
namespace internal {
MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
: TurboAssembler(isolate, buffer, size, create_code_object) {
MacroAssembler::MacroAssembler(Isolate* isolate,
const AssemblerOptions& options, void* buffer,
int size, CodeObjectRequired create_code_object)
: TurboAssembler(isolate, options, buffer, size, create_code_object) {
if (create_code_object == CodeObjectRequired::kYes) {
// Unlike TurboAssembler, which can be used off the main thread and may not
// allocate, macro assembler creates its own copy of the self-reference
@ -45,15 +45,6 @@ MacroAssembler::MacroAssembler(Isolate* isolate, void* buffer, int size,
}
}
TurboAssembler::TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
CodeObjectRequired create_code_object)
: Assembler(isolate, buffer, buffer_size), isolate_(isolate) {
if (create_code_object == CodeObjectRequired::kYes) {
code_object_ = Handle<HeapObject>::New(
isolate->heap()->self_reference_marker(), isolate);
}
}
int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion1,
Register exclusion2,
@ -135,33 +126,19 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
return bytes;
}
#ifdef V8_EMBEDDED_BUILTINS
void TurboAssembler::LookupConstant(Register destination,
Handle<Object> object) {
CHECK(isolate()->ShouldLoadConstantsFromRootList());
CHECK(root_array_available_);
// Ensure the given object is in the builtins constants table and fetch its
// index.
BuiltinsConstantsTableBuilder* builder =
isolate()->builtins_constants_table_builder();
uint32_t index = builder->AddObject(object);
// TODO(jgruber): Load builtins from the builtins table.
// TODO(jgruber): Ensure that code generation can recognize constant targets
// in kArchCallCodeObject.
void TurboAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
Heap::kBuiltinsConstantsTableRootIndex));
// The ldr call below could end up clobbering the destination register when
// the offset does not fit into 12 bits (and thus needs to be loaded from the
// constant pool). In that case, we need to be extra-careful and temporarily
// use another register as the target.
// The ldr call below could end up clobbering ip when the offset does not fit
// into 12 bits (and thus needs to be loaded from the constant pool). In that
// case, we need to be extra-careful and temporarily use another register as
// the target.
const uint32_t offset =
FixedArray::kHeaderSize + index * kPointerSize - kHeapObjectTag;
const bool could_clobber_ip = !is_uint12(offset) && destination == ip;
FixedArray::kHeaderSize + constant_index * kPointerSize - kHeapObjectTag;
const bool could_clobber_ip = !is_uint12(offset);
Register reg = destination;
if (could_clobber_ip) {
@ -178,30 +155,18 @@ void TurboAssembler::LookupConstant(Register destination,
}
}
void TurboAssembler::LookupExternalReference(Register destination,
ExternalReference reference) {
CHECK(reference.address() !=
ExternalReference::roots_array_start(isolate()).address());
CHECK(isolate()->ShouldLoadConstantsFromRootList());
CHECK(root_array_available_);
// Encode as an index into the external reference table stored on the isolate.
ExternalReferenceEncoder encoder(isolate());
ExternalReferenceEncoder::Value v = encoder.Encode(reference.address());
CHECK(!v.is_from_api());
uint32_t index = v.index();
// Generate code to load from the external reference table.
int32_t roots_to_external_reference_offset =
Heap::roots_to_external_reference_table_offset() +
ExternalReferenceTable::OffsetOfEntry(index);
ldr(destination,
MemOperand(kRootRegister, roots_to_external_reference_offset));
void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
ldr(destination, MemOperand(kRootRegister, offset));
}
void TurboAssembler::LoadRootRegisterOffset(Register destination,
intptr_t offset) {
if (offset == 0) {
Move(destination, kRootRegister);
} else {
add(destination, kRootRegister, Operand(offset));
}
}
#endif // V8_EMBEDDED_BUILTINS
void TurboAssembler::Jump(Register target, Condition cond) { bx(target, cond); }
@ -220,16 +185,36 @@ void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
#ifdef V8_EMBEDDED_BUILTINS
if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
LookupConstant(scratch, code);
add(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
Jump(scratch, cond);
return;
if (FLAG_embedded_builtins) {
int builtin_index = Builtins::kNoBuiltinId;
bool target_is_isolate_independent_builtin =
isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index);
if (target_is_isolate_independent_builtin &&
options().use_pc_relative_calls_and_jumps) {
int32_t code_target_index = AddCodeTarget(code);
b(code_target_index * Instruction::kInstrSize, cond,
RelocInfo::RELATIVE_CODE_TARGET);
return;
} else if (root_array_available_ && options().isolate_independent_code) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
IndirectLoadConstant(scratch, code);
add(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
Jump(scratch, cond);
return;
} else if (target_is_isolate_independent_builtin &&
options().inline_offheap_trampolines) {
// Inline the trampoline.
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
// Use ip directly instead of using UseScratchRegisterScope, as we do not
// preserve scratch registers across calls.
mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Jump(ip, cond);
return;
}
}
#endif // V8_EMBEDDED_BUILTINS
// 'code' is always generated ARM code, never THUMB code
Jump(static_cast<intptr_t>(code.address()), rmode, cond);
}
@ -312,16 +297,37 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond, TargetAddressStorageMode mode,
bool check_constant_pool) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
#ifdef V8_EMBEDDED_BUILTINS
if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
// Use ip directly instead of using UseScratchRegisterScope, as we do not
// preserve scratch registers across calls.
LookupConstant(ip, code);
add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
Call(ip, cond);
return;
if (FLAG_embedded_builtins) {
int builtin_index = Builtins::kNoBuiltinId;
bool target_is_isolate_independent_builtin =
isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index);
if (target_is_isolate_independent_builtin &&
options().use_pc_relative_calls_and_jumps) {
int32_t code_target_index = AddCodeTarget(code);
bl(code_target_index * Instruction::kInstrSize, cond,
RelocInfo::RELATIVE_CODE_TARGET);
return;
} else if (root_array_available_ && options().isolate_independent_code) {
// Use ip directly instead of using UseScratchRegisterScope, as we do not
// preserve scratch registers across calls.
IndirectLoadConstant(ip, code);
add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
Call(ip, cond);
return;
} else if (target_is_isolate_independent_builtin &&
options().inline_offheap_trampolines) {
// Inline the trampoline.
DCHECK(Builtins::IsBuiltinId(builtin_index));
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
// Use ip directly instead of using UseScratchRegisterScope, as we do not
// preserve scratch registers across calls.
mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Call(ip, cond);
return;
}
}
#endif // V8_EMBEDDED_BUILTINS
// 'code' is always generated ARM code, never THUMB code
Call(code.address(), rmode, cond, mode);
}
@ -362,29 +368,22 @@ void TurboAssembler::Push(Smi* smi) {
void TurboAssembler::Move(Register dst, Smi* smi) { mov(dst, Operand(smi)); }
void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
#ifdef V8_EMBEDDED_BUILTINS
if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
Heap::RootListIndex root_index;
if (!isolate()->heap()->IsRootHandle(value, &root_index)) {
LookupConstant(dst, value);
} else {
LoadRoot(dst, root_index);
if (FLAG_embedded_builtins) {
if (root_array_available_ && options().isolate_independent_code) {
IndirectLoadConstant(dst, value);
return;
}
return;
}
#endif // V8_EMBEDDED_BUILTINS
mov(dst, Operand(value));
}
void TurboAssembler::Move(Register dst, ExternalReference reference) {
#ifdef V8_EMBEDDED_BUILTINS
if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList() &&
reference.address() !=
ExternalReference::roots_array_start(isolate()).address()) {
LookupExternalReference(dst, reference);
return;
if (FLAG_embedded_builtins) {
if (root_array_available_ && options().isolate_independent_code) {
IndirectLoadExternalReference(dst, reference);
return;
}
}
#endif // V8_EMBEDDED_BUILTINS
mov(dst, Operand(reference));
}
@ -564,7 +563,7 @@ void MacroAssembler::Store(Register src,
void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
Condition cond) {
ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
ldr(destination, MemOperand(kRootRegister, RootRegisterOffset(index)), cond);
}
@ -1224,10 +1223,6 @@ void TurboAssembler::EnterFrame(StackFrame::Type type,
Register scratch = temps.Acquire();
mov(scratch, Operand(StackFrame::TypeToMarker(type)));
PushCommonFrame(scratch);
if (type == StackFrame::INTERNAL) {
Move(scratch, CodeObject());
push(scratch);
}
}
int TurboAssembler::LeaveFrame(StackFrame::Type type) {
@ -1599,9 +1594,9 @@ void MacroAssembler::InvokeFunction(Register fun, Register new_target,
ldr(temp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
ldr(expected_reg,
FieldMemOperand(temp_reg,
SharedFunctionInfo::kFormalParameterCountOffset));
ldrh(expected_reg,
FieldMemOperand(temp_reg,
SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount expected(expected_reg);
InvokeFunctionCode(fun, new_target, expected, actual, flag);
@ -1774,7 +1769,8 @@ void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
Register result,
DwVfpRegister double_input) {
DwVfpRegister double_input,
StubCallMode stub_mode) {
Label done;
TryInlineTruncateDoubleToI(result, double_input, &done);
@ -1784,7 +1780,11 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
sub(sp, sp, Operand(kDoubleSize)); // Put input on stack.
vstr(double_input, MemOperand(sp, 0));
Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
} else {
Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
}
ldr(result, MemOperand(sp, 0));
add(sp, sp, Operand(kDoubleSize));
@ -1793,8 +1793,8 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
bind(&done);
}
void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
SaveFPRegsMode save_doubles) {
void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
Register centry) {
const Runtime::Function* f = Runtime::FunctionForId(fid);
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
@ -1802,9 +1802,9 @@ void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
// smarter.
mov(r0, Operand(f->nargs));
Move(r1, ExternalReference::Create(f));
Handle<Code> code =
CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
Call(code, RelocInfo::CODE_TARGET);
DCHECK(!AreAliased(centry, r0, r1));
add(centry, centry, Operand(Code::kHeaderSize - kHeapObjectTag));
Call(centry);
}
void MacroAssembler::CallRuntime(const Runtime::Function* f,
@ -1905,18 +1905,17 @@ void TurboAssembler::Check(Condition cond, AbortReason reason) {
void TurboAssembler::Abort(AbortReason reason) {
Label abort_start;
bind(&abort_start);
#ifdef DEBUG
const char* msg = GetAbortReason(reason);
if (msg != nullptr) {
RecordComment("Abort message: ");
RecordComment(msg);
}
#ifdef DEBUG
RecordComment("Abort message: ");
RecordComment(msg);
#endif
if (FLAG_trap_on_abort) {
// Avoid emitting call to builtin if requested.
if (trap_on_abort()) {
stop(msg);
return;
}
#endif
Move(r1, Smi::FromInt(static_cast<int>(reason)));
@ -1953,6 +1952,7 @@ void TurboAssembler::InitializeRootRegister() {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
mov(kRootRegister, Operand(roots_array_start));
add(kRootRegister, kRootRegister, Operand(kRootRegisterBias));
}
void MacroAssembler::SmiTag(Register reg, SBit s) {
@ -1979,6 +1979,16 @@ void TurboAssembler::JumpIfSmi(Register value, Label* smi_label) {
b(eq, smi_label);
}
void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
cmp(x, Operand(y));
b(eq, dest);
}
void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
cmp(x, Operand(y));
b(lt, dest);
}
void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
tst(value, Operand(kSmiTagMask));
b(ne, not_smi_label);
@ -2010,18 +2020,6 @@ void MacroAssembler::AssertSmi(Register object) {
}
}
void MacroAssembler::AssertFixedArray(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
tst(object, Operand(kSmiTagMask));
Check(ne, AbortReason::kOperandIsASmiAndNotAFixedArray);
push(object);
CompareObjectType(object, object, object, FIXED_ARRAY_TYPE);
pop(object);
Check(eq, AbortReason::kOperandIsNotAFixedArray);
}
}
void MacroAssembler::AssertConstructor(Register object) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@ -2094,7 +2092,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
CompareRoot(object, Heap::kUndefinedValueRootIndex);
b(eq, &done_checking);
ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
CompareInstanceType(scratch, scratch, ALLOCATION_SITE_TYPE);
Assert(eq, AbortReason::kExpectedUndefinedOrCell);
bind(&done_checking);
}
@ -2453,7 +2451,7 @@ bool AreAliased(Register reg1,
void TurboAssembler::ComputeCodeStartAddress(Register dst) {
// We can use the register pc - 8 for the address of the current instruction.
sub(dst, pc, Operand(pc_offset() + TurboAssembler::kPcLoadDelta));
sub(dst, pc, Operand(pc_offset() + Instruction::kPcLoadDelta));
}
void TurboAssembler::ResetSpeculationPoisonRegister() {

View File

@ -9,6 +9,7 @@
#include "src/assembler.h"
#include "src/bailout-reason.h"
#include "src/globals.h"
#include "src/turbo-assembler.h"
namespace v8 {
namespace internal {
@ -25,9 +26,13 @@ constexpr Register kInterpreterAccumulatorRegister = r0;
constexpr Register kInterpreterBytecodeOffsetRegister = r5;
constexpr Register kInterpreterBytecodeArrayRegister = r6;
constexpr Register kInterpreterDispatchTableRegister = r8;
constexpr Register kJavaScriptCallArgCountRegister = r0;
constexpr Register kJavaScriptCallCodeStartRegister = r2;
constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
constexpr Register kJavaScriptCallNewTargetRegister = r3;
constexpr Register kJavaScriptCallExtraArg1Register = r2;
constexpr Register kOffHeapTrampolineRegister = ip;
constexpr Register kRuntimeCallFunctionRegister = r1;
constexpr Register kRuntimeCallArgCountRegister = r0;
@ -85,20 +90,13 @@ enum TargetAddressStorageMode {
NEVER_INLINE_TARGET_ADDRESS
};
class TurboAssembler : public Assembler {
class TurboAssembler : public TurboAssemblerBase {
public:
TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
CodeObjectRequired create_code_object);
void set_has_frame(bool value) { has_frame_ = value; }
bool has_frame() const { return has_frame_; }
Isolate* isolate() const { return isolate_; }
Handle<HeapObject> CodeObject() {
DCHECK(!code_object_.is_null());
return code_object_;
}
TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
void* buffer, int buffer_size,
CodeObjectRequired create_code_object)
: TurboAssemblerBase(isolate, options, buffer, buffer_size,
create_code_object) {}
// Activation support.
void EnterFrame(StackFrame::Type type,
@ -321,11 +319,10 @@ class TurboAssembler : public Assembler {
void AsrPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, uint32_t shift);
#ifdef V8_EMBEDDED_BUILTINS
void LookupConstant(Register destination, Handle<Object> object);
void LookupExternalReference(Register destination,
ExternalReference reference);
#endif // V8_EMBEDDED_BUILTINS
void LoadFromConstantsTable(Register destination,
int constant_index) override;
void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
void LoadRootRelative(Register destination, int32_t offset) override;
// Returns the size of a call in instructions. Note, the value returned is
// only valid as long as no entries are added to the constant pool between
@ -338,9 +335,10 @@ class TurboAssembler : public Assembler {
int CallStubSize();
void CallStubDelayed(CodeStub* stub);
// TODO(jgruber): Remove in favor of MacroAssembler::CallRuntime.
void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
// Call a runtime routine. This expects {centry} to contain a fitting CEntry
// builtin for the target runtime function and uses an indirect call.
void CallRuntimeWithCEntry(Runtime::FunctionId fid, Register centry);
// Jump, Call, and Ret pseudo instructions implementing inter-working.
void Call(Register target, Condition cond = al);
@ -355,7 +353,9 @@ class TurboAssembler : public Assembler {
// This should only be used when assembling a deoptimizer call because of
// the CheckConstPool invocation, which is only needed for deoptimization.
void CallForDeoptimization(Address target, RelocInfo::Mode rmode) {
void CallForDeoptimization(Address target, int deopt_id,
RelocInfo::Mode rmode) {
USE(deopt_id);
Call(target, rmode);
CheckConstPool(false, false);
}
@ -511,12 +511,18 @@ class TurboAssembler : public Assembler {
}
// Load an object from the root table.
void LoadRoot(Register destination, Heap::RootListIndex index) override {
LoadRoot(destination, index, al);
}
void LoadRoot(Register destination, Heap::RootListIndex index,
Condition cond = al);
Condition cond);
// Jump if the register contains a smi.
void JumpIfSmi(Register value, Label* smi_label);
void JumpIfEqual(Register x, int32_t y, Label* dest);
void JumpIfLessThan(Register x, int32_t y, Label* dest);
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
// succeeds, otherwise falls through if result is saturated. On return
@ -530,7 +536,7 @@ class TurboAssembler : public Assembler {
// the JS bitwise operations. See ECMA-262 9.5: ToInt32.
// Exits with 'result' holding the answer.
void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
DwVfpRegister double_input);
DwVfpRegister double_input, StubCallMode stub_mode);
// EABI variant for double arguments in use.
bool use_eabi_hardfloat() {
@ -549,18 +555,7 @@ class TurboAssembler : public Assembler {
void ResetSpeculationPoisonRegister();
bool root_array_available() const { return root_array_available_; }
void set_root_array_available(bool v) { root_array_available_ = v; }
protected:
// This handle will be patched with the code object on installation.
Handle<HeapObject> code_object_;
private:
bool has_frame_ = false;
bool root_array_available_ = true;
Isolate* const isolate_;
// Compare single values and then load the fpscr flags to a register.
void VFPCompareAndLoadFlags(const SwVfpRegister src1,
const SwVfpRegister src2,
@ -602,7 +597,11 @@ class TurboAssembler : public Assembler {
class MacroAssembler : public TurboAssembler {
public:
MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object);
CodeObjectRequired create_code_object)
: MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer,
size, create_code_object) {}
MacroAssembler(Isolate* isolate, const AssemblerOptions& options,
void* buffer, int size, CodeObjectRequired create_code_object);
void Mls(Register dst, Register src1, Register src2, Register srcA,
Condition cond = al);
@ -849,9 +848,6 @@ class MacroAssembler : public TurboAssembler {
void AssertNotSmi(Register object);
void AssertSmi(Register object);
// Abort execution if argument is not a FixedArray, enabled via --debug-code.
void AssertFixedArray(Register object);
// Abort execution if argument is not a Constructor, enabled via --debug-code.
void AssertConstructor(Register object);

View File

@ -285,7 +285,7 @@ void ArmDebugger::Debug() {
|| (strcmp(cmd, "printobject") == 0)) {
if (argc == 2) {
int32_t value;
OFStream os(stdout);
StdoutStream os;
if (GetValue(arg1, &value)) {
Object* obj = reinterpret_cast<Object*>(value);
os << arg1 << ": \n";
@ -514,7 +514,7 @@ void ArmDebugger::Debug() {
PrintF(" Stops are debug instructions inserted by\n");
PrintF(" the Assembler::stop() function.\n");
PrintF(" When hitting a stop, the Simulator will\n");
PrintF(" stop and and give control to the ArmDebugger.\n");
PrintF(" stop and give control to the ArmDebugger.\n");
PrintF(" The first %d stop codes are watched:\n",
Simulator::kNumOfWatchedStops);
PrintF(" - They can be enabled / disabled: the Simulator\n");

View File

@ -386,18 +386,16 @@ unsigned Operand::shift_amount() const {
Operand Operand::UntagSmi(Register smi) {
STATIC_ASSERT(kXRegSizeInBits == static_cast<unsigned>(kSmiShift +
kSmiValueSize));
DCHECK(smi.Is64Bits());
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
return Operand(smi, ASR, kSmiShift);
}
Operand Operand::UntagSmiAndScale(Register smi, int scale) {
STATIC_ASSERT(kXRegSizeInBits == static_cast<unsigned>(kSmiShift +
kSmiValueSize));
DCHECK(smi.Is64Bits());
DCHECK((scale >= 0) && (scale <= (64 - kSmiValueSize)));
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
if (scale > kSmiShift) {
return Operand(smi, LSL, scale - kSmiShift);
} else if (scale < kSmiShift) {
@ -551,11 +549,8 @@ Handle<Code> Assembler::code_target_object_handle_at(Address pc) {
Assembler::target_address_at(pc, 0 /* unused */)));
} else {
DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
DCHECK_GE(instr->ImmPCOffset(), 0);
DCHECK_EQ(instr->ImmPCOffset() % kInstructionSize, 0);
DCHECK_LT(instr->ImmPCOffset() >> kInstructionSizeLog2,
code_targets_.size());
return code_targets_[instr->ImmPCOffset() >> kInstructionSizeLog2];
return GetCodeTarget(instr->ImmPCOffset() >> kInstructionSizeLog2);
}
}
@ -565,7 +560,7 @@ Address Assembler::runtime_entry_at(Address pc) {
return Assembler::target_address_at(pc, 0 /* unused */);
} else {
DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
return instr->ImmPCOffset() + isolate_data().code_range_start_;
return instr->ImmPCOffset() + options().code_range_start;
}
}
@ -708,7 +703,7 @@ Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
}
}
void RelocInfo::set_target_object(HeapObject* target,
void RelocInfo::set_target_object(Heap* heap, HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
@ -716,9 +711,8 @@ void RelocInfo::set_target_object(HeapObject* target,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
target);
host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
heap->incremental_marking()->RecordWriteIntoCode(host(), this, target);
heap->RecordWriteIntoCode(host(), this, target);
}
}
@ -746,13 +740,6 @@ Address RelocInfo::target_internal_reference_address() {
return pc_;
}
void RelocInfo::set_wasm_code_table_entry(Address target,
ICacheFlushMode icache_flush_mode) {
DCHECK(rmode_ == RelocInfo::WASM_CODE_TABLE_ENTRY);
Assembler::set_target_address_at(pc_, constant_pool_, target,
icache_flush_mode);
}
Address RelocInfo::target_runtime_entry(Assembler* origin) {
DCHECK(IsRuntimeEntry(rmode_));
return origin->runtime_entry_at(pc_);
@ -788,7 +775,7 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
visitor->VisitEmbeddedPointer(host(), this);
} else if (RelocInfo::IsCodeTarget(mode)) {
} else if (RelocInfo::IsCodeTargetMode(mode)) {
visitor->VisitCodeTarget(host(), this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(host(), this);

View File

@ -157,9 +157,10 @@ CPURegList CPURegList::GetSafepointSavedRegisters() {
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
1 << RelocInfo::RUNTIME_ENTRY |
1 << RelocInfo::INTERNAL_REFERENCE;
const int RelocInfo::kApplyMask =
RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE);
bool RelocInfo::IsCodedSpecially() {
// The deserializer needs to know whether a pointer is specially coded. Being
@ -179,33 +180,39 @@ bool RelocInfo::IsInConstantPool() {
return instr->IsLdrLiteralX();
}
Address RelocInfo::embedded_address() const {
return Assembler::target_address_at(pc_, constant_pool_);
}
int RelocInfo::GetDeoptimizationId(Isolate* isolate, DeoptimizeKind kind) {
DCHECK(IsRuntimeEntry(rmode_));
Instruction* movz_instr = reinterpret_cast<Instruction*>(pc_)->preceding();
DCHECK(movz_instr->IsMovz());
uint64_t imm = static_cast<uint64_t>(movz_instr->ImmMoveWide())
<< (16 * movz_instr->ShiftMoveWide());
DCHECK_LE(imm, INT_MAX);
uint32_t RelocInfo::embedded_size() const {
return Memory::uint32_at(Assembler::target_pointer_address_at(pc_));
}
void RelocInfo::set_embedded_address(Address address,
ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(pc_, constant_pool_, address, flush_mode);
}
void RelocInfo::set_embedded_size(uint32_t size, ICacheFlushMode flush_mode) {
Memory::uint32_at(Assembler::target_pointer_address_at(pc_)) = size;
// No icache flushing needed, see comment in set_target_address_at.
return static_cast<int>(imm);
}
void RelocInfo::set_js_to_wasm_address(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
set_embedded_address(address, icache_flush_mode);
Assembler::set_target_address_at(pc_, constant_pool_, address,
icache_flush_mode);
}
Address RelocInfo::js_to_wasm_address() const {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
return embedded_address();
return Assembler::target_address_at(pc_, constant_pool_);
}
uint32_t RelocInfo::wasm_call_tag() const {
DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
Instruction* instr = reinterpret_cast<Instruction*>(pc_);
if (instr->IsLdrLiteralX()) {
return static_cast<uint32_t>(
Memory::Address_at(Assembler::target_pointer_address_at(pc_)));
} else {
DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
return static_cast<uint32_t>(instr->ImmPCOffset() / kInstructionSize);
}
}
bool AreAliased(const CPURegister& reg1, const CPURegister& reg2,
@ -304,8 +311,8 @@ void Immediate::InitializeHandle(Handle<HeapObject> handle) {
bool Operand::NeedsRelocation(const Assembler* assembler) const {
RelocInfo::Mode rmode = immediate_.rmode();
if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
return assembler->serializer_enabled();
if (RelocInfo::IsOnlyForSerializer(rmode)) {
return assembler->options().record_reloc_info_for_serialization;
}
return !RelocInfo::IsNone(rmode);
@ -342,8 +349,7 @@ bool ConstPool::RecordEntry(intptr_t data, RelocInfo::Mode mode) {
if (CanBeShared(mode)) {
write_reloc_info = AddSharedEntry(shared_entries_, raw_data, offset);
} else if (mode == RelocInfo::CODE_TARGET &&
assm_->IsCodeTargetSharingAllowed() && raw_data != 0) {
} else if (mode == RelocInfo::CODE_TARGET && raw_data != 0) {
// A zero data value is a placeholder and must not be shared.
write_reloc_info = AddSharedEntry(handle_to_index_map_, raw_data, offset);
} else {
@ -471,8 +477,7 @@ void ConstPool::Clear() {
bool ConstPool::CanBeShared(RelocInfo::Mode mode) {
return RelocInfo::IsNone(mode) ||
(mode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE);
return RelocInfo::IsNone(mode) || RelocInfo::IsShareableRelocMode(mode);
}
@ -537,7 +542,7 @@ void ConstPool::EmitEntries() {
// Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
instr->SetImmPCOffsetTarget(assm_->isolate_data(), assm_->pc());
instr->SetImmPCOffsetTarget(assm_->options(), assm_->pc());
}
assm_->dc64(entry.first);
@ -547,13 +552,13 @@ void ConstPool::EmitEntries() {
// Assembler
Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
: AssemblerBase(isolate_data, buffer, buffer_size),
Assembler::Assembler(const AssemblerOptions& options, void* buffer,
int buffer_size)
: AssemblerBase(options, buffer, buffer_size),
constpool_(this),
unresolved_branches_() {
const_pool_blocked_nesting_ = 0;
veneer_pool_blocked_nesting_ = 0;
code_target_sharing_blocked_nesting_ = 0;
Reset();
}
@ -562,7 +567,6 @@ Assembler::~Assembler() {
DCHECK(constpool_.IsEmpty());
DCHECK_EQ(const_pool_blocked_nesting_, 0);
DCHECK_EQ(veneer_pool_blocked_nesting_, 0);
DCHECK_EQ(code_target_sharing_blocked_nesting_, 0);
}
@ -571,12 +575,11 @@ void Assembler::Reset() {
DCHECK((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_));
DCHECK_EQ(const_pool_blocked_nesting_, 0);
DCHECK_EQ(veneer_pool_blocked_nesting_, 0);
DCHECK_EQ(code_target_sharing_blocked_nesting_, 0);
DCHECK(unresolved_branches_.empty());
memset(buffer_, 0, pc_ - buffer_);
#endif
pc_ = buffer_;
code_targets_.reserve(64);
ReserveCodeTargetSpace(64);
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
constpool_.Clear();
next_constant_pool_check_ = 0;
@ -589,8 +592,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Address pc = reinterpret_cast<Address>(buffer_) + request.offset();
switch (request.kind()) {
case HeapObjectRequest::kHeapNumber: {
Handle<HeapObject> object = isolate->factory()->NewHeapNumber(
request.heap_number(), IMMUTABLE, TENURED);
Handle<HeapObject> object =
isolate->factory()->NewHeapNumber(request.heap_number(), TENURED);
set_target_address_at(pc, 0 /* unused */, object.address());
break;
}
@ -598,12 +601,9 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
request.code_stub()->set_isolate(isolate);
Instruction* instr = reinterpret_cast<Instruction*>(pc);
DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch());
DCHECK_GE(instr->ImmPCOffset(), 0);
DCHECK_EQ(instr->ImmPCOffset() % kInstructionSize, 0);
DCHECK_LT(instr->ImmPCOffset() >> kInstructionSizeLog2,
code_targets_.size());
code_targets_[instr->ImmPCOffset() >> kInstructionSizeLog2] =
request.code_stub()->GetCode();
UpdateCodeTarget(instr->ImmPCOffset() >> kInstructionSizeLog2,
request.code_stub()->GetCode());
break;
}
}
@ -697,22 +697,22 @@ void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch,
} else if (branch == next_link) {
// The branch is the last (but not also the first) instruction in the chain.
prev_link->SetImmPCOffsetTarget(isolate_data(), prev_link);
prev_link->SetImmPCOffsetTarget(options(), prev_link);
} else {
// The branch is in the middle of the chain.
if (prev_link->IsTargetInImmPCOffsetRange(next_link)) {
prev_link->SetImmPCOffsetTarget(isolate_data(), next_link);
prev_link->SetImmPCOffsetTarget(options(), next_link);
} else if (label_veneer != nullptr) {
// Use the veneer for all previous links in the chain.
prev_link->SetImmPCOffsetTarget(isolate_data(), prev_link);
prev_link->SetImmPCOffsetTarget(options(), prev_link);
end_of_chain = false;
link = next_link;
while (!end_of_chain) {
next_link = link->ImmPCOffsetTarget();
end_of_chain = (link == next_link);
link->SetImmPCOffsetTarget(isolate_data(), label_veneer);
link->SetImmPCOffsetTarget(options(), label_veneer);
link = next_link;
}
} else {
@ -783,11 +783,10 @@ void Assembler::bind(Label* label) {
// Internal references do not get patched to an instruction but directly
// to an address.
internal_reference_positions_.push_back(linkoffset);
PatchingAssembler patcher(isolate_data(), reinterpret_cast<byte*>(link),
2);
PatchingAssembler patcher(options(), reinterpret_cast<byte*>(link), 2);
patcher.dc64(reinterpret_cast<uintptr_t>(pc_));
} else {
link->SetImmPCOffsetTarget(isolate_data(),
link->SetImmPCOffsetTarget(options(),
reinterpret_cast<Instruction*>(pc_));
}
@ -4082,9 +4081,7 @@ void Assembler::EmitStringData(const char* string) {
void Assembler::debug(const char* message, uint32_t code, Instr params) {
#ifdef USE_SIMULATOR
// Don't generate simulator specific code if we are building a snapshot, which
// might be run on real hardware.
if (!serializer_enabled()) {
if (options().enable_simulator_code) {
// The arguments to the debug marker need to be contiguous in memory, so
// make sure we don't try to emit pools.
BlockPoolsScope scope(this);
@ -4757,6 +4754,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
ConstantPoolMode constant_pool_mode) {
// Non-relocatable constants should not end up in the literal pool.
DCHECK(!RelocInfo::IsNone(rmode));
if (options().disable_reloc_info_for_patching) return;
// We do not try to reuse pool constants.
RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, nullptr);
@ -4783,10 +4781,10 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
// For modes that cannot use the constant pool, a different sequence of
// instructions will be emitted by this function's caller.
if (!RelocInfo::IsNone(rmode) && write_reloc_info) {
if (write_reloc_info) {
// Don't record external references unless the heap will be serialized.
if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
!serializer_enabled() && !emit_debug_code()) {
if (RelocInfo::IsOnlyForSerializer(rmode) &&
!options().record_reloc_info_for_serialization && !emit_debug_code()) {
return;
}
DCHECK_GE(buffer_space(), kMaxRelocSize); // too late to grow buffer here
@ -4794,18 +4792,6 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
}
}
int Assembler::GetCodeTargetIndex(Handle<Code> target) {
int current = static_cast<int>(code_targets_.size());
if (current > 0 && !target.is_null() &&
code_targets_.back().address() == target.address()) {
// Optimization if we keep jumping to the same code target.
return (current - 1);
} else {
code_targets_.push_back(target);
return current;
}
}
void Assembler::near_jump(int offset, RelocInfo::Mode rmode) {
if (!RelocInfo::IsNone(rmode)) RecordRelocInfo(rmode, offset, NO_POOL_ENTRY);
b(offset);
@ -4818,7 +4804,7 @@ void Assembler::near_call(int offset, RelocInfo::Mode rmode) {
void Assembler::near_call(HeapObjectRequest request) {
RequestHeapObject(request);
int index = GetCodeTargetIndex(Handle<Code>());
int index = AddCodeTarget(Handle<Code>());
RecordRelocInfo(RelocInfo::CODE_TARGET, index, NO_POOL_ENTRY);
bl(index);
}
@ -4945,7 +4931,7 @@ void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin) {
// to the label.
Instruction* veneer = reinterpret_cast<Instruction*>(pc_);
RemoveBranchFromLabelLinkChain(branch, label, veneer);
branch->SetImmPCOffsetTarget(isolate_data(), veneer);
branch->SetImmPCOffsetTarget(options(), veneer);
b(label);
#ifdef DEBUG
DCHECK(SizeOfCodeGeneratedSince(&veneer_size_check) <=

View File

@ -898,9 +898,7 @@ class Assembler : public AssemblerBase {
// buffer for code generation and assumes its size to be buffer_size. If the
// buffer is too small, a fatal error occurs. No deallocation of the buffer is
// done upon destruction of the assembler.
Assembler(Isolate* isolate, void* buffer, int buffer_size)
: Assembler(IsolateData(isolate), buffer, buffer_size) {}
Assembler(IsolateData isolate_data, void* buffer, int buffer_size);
Assembler(const AssemblerOptions& options, void* buffer, int buffer_size);
virtual ~Assembler();
@ -972,10 +970,6 @@ class Assembler : public AssemblerBase {
Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// Add 'target' to the code_targets_ vector, if necessary, and return the
// offset at which it is stored.
int GetCodeTargetIndex(Handle<Code> target);
// Returns the handle for the code object called at 'pc'.
// This might need to be temporarily encoded as an offset into code_targets_.
inline Handle<Code> code_target_object_handle_at(Address pc);
@ -984,7 +978,7 @@ class Assembler : public AssemblerBase {
// at 'pc'.
// Runtime entries can be temporarily encoded as the offset between the
// runtime function entrypoint and the code range start (stored in the
// code_range_start_ field), in order to be encodable as we generate the code,
// code_range_start field), in order to be encodable as we generate the code,
// before it is moved into the code space.
inline Address runtime_entry_at(Address pc);
@ -2887,6 +2881,10 @@ class Assembler : public AssemblerBase {
return reinterpret_cast<byte*>(instr) - buffer_;
}
static const char* GetSpecialRegisterName(int code) {
return (code == kSPRegInternalCode) ? "sp" : "UNKNOWN";
}
// Register encoding.
static Instr Rd(CPURegister rd) {
DCHECK_NE(rd.code(), kSPRegInternalCode);
@ -3229,34 +3227,6 @@ class Assembler : public AssemblerBase {
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockPoolsScope);
};
// Class for blocking sharing of code targets in constant pool.
class BlockCodeTargetSharingScope {
public:
explicit BlockCodeTargetSharingScope(Assembler* assem) : assem_(nullptr) {
Open(assem);
}
// This constructor does not initialize the scope. The user needs to
// explicitly call Open() before using it.
BlockCodeTargetSharingScope() : assem_(nullptr) {}
~BlockCodeTargetSharingScope() { Close(); }
void Open(Assembler* assem) {
DCHECK_NULL(assem_);
DCHECK_NOT_NULL(assem);
assem_ = assem;
assem_->StartBlockCodeTargetSharing();
}
private:
void Close() {
if (assem_ != nullptr) {
assem_->EndBlockCodeTargetSharing();
}
}
Assembler* assem_;
DISALLOW_COPY_AND_ASSIGN(BlockCodeTargetSharingScope);
};
protected:
inline const Register& AppropriateZeroRegFor(const CPURegister& reg) const;
@ -3341,16 +3311,6 @@ class Assembler : public AssemblerBase {
void RemoveBranchFromLabelLinkChain(Instruction* branch, Label* label,
Instruction* label_veneer = nullptr);
// Prevent sharing of code target constant pool entries until
// EndBlockCodeTargetSharing is called. Calls to this function can be nested
// but must be followed by an equal number of call to
// EndBlockCodeTargetSharing.
void StartBlockCodeTargetSharing() { ++code_target_sharing_blocked_nesting_; }
// Resume sharing of constant pool code target entries. Needs to be called
// as many times as StartBlockCodeTargetSharing to have an effect.
void EndBlockCodeTargetSharing() { --code_target_sharing_blocked_nesting_; }
private:
static uint32_t FPToImm8(double imm);
@ -3530,12 +3490,6 @@ class Assembler : public AssemblerBase {
// Emission of the veneer pools may be blocked in some code sequences.
int veneer_pool_blocked_nesting_; // Block emission if this is not zero.
// Sharing of code target entries may be blocked in some code sequences.
int code_target_sharing_blocked_nesting_;
bool IsCodeTargetSharingAllowed() const {
return code_target_sharing_blocked_nesting_ == 0;
}
// Relocation info generation
// Each relocation is encoded as a variable size value
static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize;
@ -3546,14 +3500,6 @@ class Assembler : public AssemblerBase {
// are already bound.
std::deque<int> internal_reference_positions_;
// Before we copy code into the code space, we cannot encode calls to code
// targets as we normally would, as the difference between the instruction's
// location in the temporary buffer and the call target is not guaranteed to
// fit in the offset field. We keep track of the code handles we encounter
// in calls in this vector, and encode the index of the code handle in the
// vector instead.
std::vector<Handle<Code>> code_targets_;
// Relocation info records are also used during code generation as temporary
// containers for constants and code target addresses until they are emitted
// to the constant pool. These pending relocation info records are temporarily
@ -3649,20 +3595,8 @@ class Assembler : public AssemblerBase {
// the length of the label chain.
void DeleteUnresolvedBranchInfoForLabelTraverse(Label* label);
// The following functions help with avoiding allocations of embedded heap
// objects during the code assembly phase. {RequestHeapObject} records the
// need for a future heap number allocation or code stub generation. After
// code assembly, {AllocateAndInstallRequestedHeapObjects} will allocate these
// objects and place them where they are expected (determined by the pc offset
// associated with each request). That is, for each request, it will patch the
// dummy heap object handle that we emitted during code assembly with the
// actual heap object handle.
void RequestHeapObject(HeapObjectRequest request);
void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
std::forward_list<HeapObjectRequest> heap_object_requests_;
private:
friend class EnsureSpace;
friend class ConstPool;
};
@ -3678,8 +3612,9 @@ class PatchingAssembler : public Assembler {
// relocation information takes space in the buffer, the PatchingAssembler
// will crash trying to grow the buffer.
// Note that the instruction cache will not be flushed.
PatchingAssembler(IsolateData isolate_data, byte* start, unsigned count)
: Assembler(isolate_data, start, count * kInstructionSize + kGap) {
PatchingAssembler(const AssemblerOptions& options, byte* start,
unsigned count)
: Assembler(options, start, count * kInstructionSize + kGap) {
// Block constant pool emission.
StartBlockPools();
}

View File

@ -29,21 +29,6 @@ namespace internal {
#define __ ACCESS_MASM(masm)
void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
__ Mov(x5, Operand(x0, LSL, kPointerSizeLog2));
__ Poke(x1, Operand(x5));
__ Push(x1, x2);
__ Add(x0, x0, Operand(3));
__ TailCallRuntime(Runtime::kNewArray);
}
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
// It is important that the following stubs are generated in this order
// because pregenerated stubs can only call other pregenerated stubs.
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
}
// This is the entry point from C++. 5 arguments are provided in x0-x4.
// See use of the JSEntryFunction for example in src/execution.cc.
// Input:
@ -324,322 +309,6 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
__ Call(GetCode(), RelocInfo::CODE_TARGET);
}
template<class T>
static void CreateArrayDispatch(MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
ASM_LOCATION("CreateArrayDispatch");
if (mode == DISABLE_ALLOCATION_SITES) {
T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
Register kind = x3;
int last_index =
GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= last_index; ++i) {
Label next;
ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i);
// TODO(jbramley): Is this the best way to handle this? Can we make the
// tail calls conditional, rather than hopping over each one?
__ CompareAndBranch(kind, candidate_kind, ne, &next);
T stub(masm->isolate(), candidate_kind);
__ TailCallStub(&stub);
__ Bind(&next);
}
// If we reached this point there is a problem.
__ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
} else {
UNREACHABLE();
}
}
// TODO(jbramley): If this needs to be a special case, make it a proper template
// specialization, and not a separate function.
static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
ASM_LOCATION("CreateArrayDispatchOneArgument");
// x0 - argc
// x1 - constructor?
// x2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
// x3 - kind (if mode != DISABLE_ALLOCATION_SITES)
// sp[0] - last argument
Register allocation_site = x2;
Register kind = x3;
STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
STATIC_ASSERT(HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(PACKED_ELEMENTS == 2);
STATIC_ASSERT(HOLEY_ELEMENTS == 3);
STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS == 4);
STATIC_ASSERT(HOLEY_DOUBLE_ELEMENTS == 5);
if (mode == DISABLE_ALLOCATION_SITES) {
ElementsKind initial = GetInitialFastElementsKind();
ElementsKind holey_initial = GetHoleyElementsKind(initial);
ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
holey_initial,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub_holey);
} else if (mode == DONT_OVERRIDE) {
// Is the low bit set? If so, the array is holey.
Label normal_sequence;
__ Tbnz(kind, 0, &normal_sequence);
// We are going to create a holey array, but our kind is non-holey.
// Fix kind and retry (only if we have an allocation site in the slot).
__ Orr(kind, kind, 1);
if (FLAG_debug_code) {
__ Ldr(x10, FieldMemOperand(allocation_site, 0));
__ JumpIfNotRoot(x10, Heap::kAllocationSiteMapRootIndex,
&normal_sequence);
__ Assert(eq, AbortReason::kExpectedAllocationSite);
}
// Save the resulting elements kind in type info. We can't just store 'kind'
// in the AllocationSite::transition_info field because elements kind is
// restricted to a portion of the field; upper bits need to be left alone.
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
__ Ldr(x11,
FieldMemOperand(allocation_site,
AllocationSite::kTransitionInfoOrBoilerplateOffset));
__ Add(x11, x11, Smi::FromInt(kFastElementsKindPackedToHoley));
__ Str(x11,
FieldMemOperand(allocation_site,
AllocationSite::kTransitionInfoOrBoilerplateOffset));
__ Bind(&normal_sequence);
int last_index =
GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= last_index; ++i) {
Label next;
ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i);
__ CompareAndBranch(kind, candidate_kind, ne, &next);
ArraySingleArgumentConstructorStub stub(masm->isolate(), candidate_kind);
__ TailCallStub(&stub);
__ Bind(&next);
}
// If we reached this point there is a problem.
__ Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
} else {
UNREACHABLE();
}
}
template<class T>
static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
int to_index =
GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(isolate, kind);
stub.GetCode();
if (AllocationSite::ShouldTrack(kind)) {
T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
stub1.GetCode();
}
}
}
void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
isolate);
ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
isolate);
ArrayNArgumentsConstructorStub stub(isolate);
stub.GetCode();
ElementsKind kinds[2] = {PACKED_ELEMENTS, HOLEY_ELEMENTS};
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things
InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
stubh1.GetCode();
InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
stubh2.GetCode();
}
}
void ArrayConstructorStub::GenerateDispatchToArrayStub(
MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
Register argc = x0;
Label zero_case, n_case;
__ Cbz(argc, &zero_case);
__ Cmp(argc, 1);
__ B(ne, &n_case);
// One argument.
CreateArrayDispatchOneArgument(masm, mode);
__ Bind(&zero_case);
// No arguments.
CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
__ Bind(&n_case);
// N arguments.
ArrayNArgumentsConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
void ArrayConstructorStub::Generate(MacroAssembler* masm) {
ASM_LOCATION("ArrayConstructorStub::Generate");
// ----------- S t a t e -------------
// -- x0 : argc (only if argument_count() is ANY or MORE_THAN_ONE)
// -- x1 : constructor
// -- x2 : AllocationSite or undefined
// -- x3 : new target
// -- sp[0] : last argument
// -----------------------------------
Register constructor = x1;
Register allocation_site = x2;
Register new_target = x3;
if (FLAG_debug_code) {
// The array construct code is only set for the global and natives
// builtin Array functions which always have maps.
Label unexpected_map, map_ok;
// Initial map for the builtin Array function should be a map.
__ Ldr(x10, FieldMemOperand(constructor,
JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ JumpIfSmi(x10, &unexpected_map);
__ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
__ Bind(&unexpected_map);
__ Abort(AbortReason::kUnexpectedInitialMapForArrayFunction);
__ Bind(&map_ok);
// We should either have undefined in the allocation_site register or a
// valid AllocationSite.
__ AssertUndefinedOrAllocationSite(allocation_site);
}
// Enter the context of the Array function.
__ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
Label subclassing;
__ Cmp(new_target, constructor);
__ B(ne, &subclassing);
Register kind = x3;
Label no_info;
// Get the elements kind and case on that.
__ JumpIfRoot(allocation_site, Heap::kUndefinedValueRootIndex, &no_info);
__ Ldrsw(kind, UntagSmiFieldMemOperand(
allocation_site,
AllocationSite::kTransitionInfoOrBoilerplateOffset));
__ And(kind, kind, AllocationSite::ElementsKindBits::kMask);
GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
__ Bind(&no_info);
GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
// Subclassing support.
__ Bind(&subclassing);
__ Poke(constructor, Operand(x0, LSL, kPointerSizeLog2));
__ Add(x0, x0, Operand(3));
__ Push(new_target, allocation_site);
__ JumpToExternalReference(ExternalReference::Create(Runtime::kNewArray));
}
void InternalArrayConstructorStub::GenerateCase(
MacroAssembler* masm, ElementsKind kind) {
Label zero_case, n_case;
Register argc = x0;
__ Cbz(argc, &zero_case);
__ CompareAndBranch(argc, 1, ne, &n_case);
// One argument.
if (IsFastPackedElementsKind(kind)) {
Label packed_case;
// We might need to create a holey array; look at the first argument.
__ Peek(x10, 0);
__ Cbz(x10, &packed_case);
InternalArraySingleArgumentConstructorStub
stub1_holey(isolate(), GetHoleyElementsKind(kind));
__ TailCallStub(&stub1_holey);
__ Bind(&packed_case);
}
InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
__ TailCallStub(&stub1);
__ Bind(&zero_case);
// No arguments.
InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
__ TailCallStub(&stub0);
__ Bind(&n_case);
// N arguments.
ArrayNArgumentsConstructorStub stubN(isolate());
__ TailCallStub(&stubN);
}
void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argc
// -- x1 : constructor
// -- sp[0] : return address
// -- sp[4] : last argument
// -----------------------------------
Register constructor = x1;
if (FLAG_debug_code) {
// The array construct code is only set for the global and natives
// builtin Array functions which always have maps.
Label unexpected_map, map_ok;
// Initial map for the builtin Array function should be a map.
__ Ldr(x10, FieldMemOperand(constructor,
JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ JumpIfSmi(x10, &unexpected_map);
__ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
__ Bind(&unexpected_map);
__ Abort(AbortReason::kUnexpectedInitialMapForArrayFunction);
__ Bind(&map_ok);
}
Register kind = w3;
// Figure out the right elements kind
__ Ldr(x10, FieldMemOperand(constructor,
JSFunction::kPrototypeOrInitialMapOffset));
// Retrieve elements_kind from map.
__ LoadElementsKindFromMap(kind, x10);
if (FLAG_debug_code) {
Label done;
__ Cmp(x3, PACKED_ELEMENTS);
__ Ccmp(x3, HOLEY_ELEMENTS, ZFlag, ne);
__ Assert(
eq,
AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray);
}
Label fast_elements_case;
__ CompareAndBranch(kind, PACKED_ELEMENTS, eq, &fast_elements_case);
GenerateCase(masm, HOLEY_ELEMENTS);
__ Bind(&fast_elements_case);
GenerateCase(masm, PACKED_ELEMENTS);
}
// The number of register that CallApiFunctionAndReturn will need to save on
// the stack. The space for these registers need to be allocated in the
// ExitFrame before calling CallApiFunctionAndReturn.
@ -894,8 +563,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
__ Ldr(data, FieldMemOperand(callback, AccessorInfo::kDataOffset));
__ LoadRoot(undef, Heap::kUndefinedValueRootIndex);
__ Mov(isolate_address,
Operand(ExternalReference::isolate_address(isolate())));
__ Mov(isolate_address, ExternalReference::isolate_address(isolate()));
__ Ldr(name, FieldMemOperand(callback, AccessorInfo::kNameOffset));
// PropertyCallbackArguments:

View File

@ -26,6 +26,7 @@ STATIC_ASSERT(sizeof(1L) == sizeof(int64_t));
namespace v8 {
namespace internal {
constexpr size_t kMaxPCRelativeCodeRangeInMB = 128;
const unsigned kInstructionSize = 4;
const unsigned kInstructionSizeLog2 = 2;
@ -140,6 +141,11 @@ const unsigned kFloat16MantissaBits = 10;
const unsigned kFloat16ExponentBits = 5;
const unsigned kFloat16ExponentBias = 15;
// Actual value of root register is offset from the root array's start
// to take advantage of negative displacement values.
// TODO(sigurds): Choose best value.
constexpr int kRootRegisterBias = 256;
typedef uint16_t float16;
#define INSTRUCTION_FIELDS_LIST(V_) \

View File

@ -155,7 +155,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ Tst(x1, kSmiTagMask);
__ CzeroX(x0, eq);
__ Mov(x1, type());
__ Mov(x1, static_cast<int>(deopt_kind()));
// Following arguments are already loaded:
// - x2: bailout id
// - x3: code object address
@ -275,50 +275,18 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ Br(continuation);
}
// Size of an entry of the second level deopt table.
// This is the code size generated by GeneratePrologue for one entry.
const int Deoptimizer::table_entry_size_ = kInstructionSize;
// Size of an entry of the second level deopt table. Since we do not generate
// a table for ARM64, the size is zero.
const int Deoptimizer::table_entry_size_ = 0 * kInstructionSize;
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
UseScratchRegisterScope temps(masm());
// The address at which the deopt table is entered should be in x16, the first
// temp register allocated. We can't assert that the address is in there, but
// we can check that it's the first allocated temp. Later, we'll also check
// the computed entry_id is in the expected range.
Register entry_addr = temps.AcquireX();
// The MacroAssembler will have put the deoptimization id in x16, the first
// temp register allocated. We can't assert that the id is in there, but we
// can check that x16 the first allocated temp and that the value it contains
// is in the expected range.
Register entry_id = temps.AcquireX();
DCHECK(entry_addr.Is(x16));
DCHECK(entry_id.Is(x17));
// Create a sequence of deoptimization entries.
// Note that registers are still live when jumping to an entry.
{
InstructionAccurateScope scope(masm());
Label start_of_table, end_of_table;
__ bind(&start_of_table);
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
__ b(&end_of_table);
DCHECK(masm()->pc_offset() - start == table_entry_size_);
}
__ bind(&end_of_table);
// Get the address of the start of the table.
DCHECK(is_int21(table_entry_size_ * count()));
__ adr(entry_id, &start_of_table);
// Compute the gap in bytes between the entry address, which should have
// been left in entry_addr (x16) by CallForDeoptimization, and the start of
// the table.
__ sub(entry_id, entry_addr, entry_id);
// Shift down to obtain the entry_id.
DCHECK_EQ(table_entry_size_, kInstructionSize);
__ lsr(entry_id, entry_id, kInstructionSizeLog2);
}
DCHECK(entry_id.Is(x16));
__ Push(padreg, entry_id);
if (__ emit_debug_code()) {

View File

@ -5,6 +5,10 @@
#ifndef V8_ARM64_FRAME_CONSTANTS_ARM64_H_
#define V8_ARM64_FRAME_CONSTANTS_ARM64_H_
#include "src/base/macros.h"
#include "src/frame-constants.h"
#include "src/globals.h"
namespace v8 {
namespace internal {
@ -46,6 +50,20 @@ class ExitFrameConstants : public TypedFrameConstants {
static constexpr int kConstantPoolOffset = 0; // Not used
};
class WasmCompileLazyFrameConstants : public TypedFrameConstants {
public:
static constexpr int kNumberOfSavedGpParamRegs = 8;
static constexpr int kNumberOfSavedFpParamRegs = 8;
// FP-relative.
static constexpr int kWasmInstanceOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(1);
static constexpr int kFixedFrameSizeFromFp =
// Header is padded to 16 byte (see {MacroAssembler::EnterFrame}).
RoundUp<16>(TypedFrameConstants::kFixedFrameSizeFromFp) +
kNumberOfSavedGpParamRegs * kPointerSize +
kNumberOfSavedFpParamRegs * kDoubleSize;
};
class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.

View File

@ -227,21 +227,21 @@ bool Instruction::IsTargetInImmPCOffsetRange(Instruction* target) {
return IsValidImmPCOffset(BranchType(), DistanceTo(target));
}
void Instruction::SetImmPCOffsetTarget(Assembler::IsolateData isolate_data,
void Instruction::SetImmPCOffsetTarget(const AssemblerOptions& options,
Instruction* target) {
if (IsPCRelAddressing()) {
SetPCRelImmTarget(isolate_data, target);
SetPCRelImmTarget(options, target);
} else if (BranchType() != UnknownBranchType) {
SetBranchImmTarget(target);
} else if (IsUnresolvedInternalReference()) {
SetUnresolvedInternalReferenceImmTarget(isolate_data, target);
SetUnresolvedInternalReferenceImmTarget(options, target);
} else {
// Load literal (offset from PC).
SetImmLLiteral(target);
}
}
void Instruction::SetPCRelImmTarget(Assembler::IsolateData isolate_data,
void Instruction::SetPCRelImmTarget(const AssemblerOptions& options,
Instruction* target) {
// ADRP is not supported, so 'this' must point to an ADR instruction.
DCHECK(IsAdr());
@ -252,7 +252,7 @@ void Instruction::SetPCRelImmTarget(Assembler::IsolateData isolate_data,
imm = Assembler::ImmPCRelAddress(static_cast<int>(target_offset));
SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
} else {
PatchingAssembler patcher(isolate_data, reinterpret_cast<byte*>(this),
PatchingAssembler patcher(options, reinterpret_cast<byte*>(this),
PatchingAssembler::kAdrFarPatchableNInstrs);
patcher.PatchAdrFar(target_offset);
}
@ -293,7 +293,7 @@ void Instruction::SetBranchImmTarget(Instruction* target) {
}
void Instruction::SetUnresolvedInternalReferenceImmTarget(
Assembler::IsolateData isolate_data, Instruction* target) {
const AssemblerOptions& options, Instruction* target) {
DCHECK(IsUnresolvedInternalReference());
DCHECK(IsAligned(DistanceTo(target), kInstructionSize));
DCHECK(is_int32(DistanceTo(target) >> kInstructionSizeLog2));
@ -302,7 +302,7 @@ void Instruction::SetUnresolvedInternalReferenceImmTarget(
uint32_t high16 = unsigned_bitextract_32(31, 16, target_offset);
uint32_t low16 = unsigned_bitextract_32(15, 0, target_offset);
PatchingAssembler patcher(isolate_data, reinterpret_cast<byte*>(this), 2);
PatchingAssembler patcher(options, reinterpret_cast<byte*>(this), 2);
patcher.brk(high16);
patcher.brk(low16);
}

View File

@ -402,9 +402,9 @@ class Instruction {
bool IsTargetInImmPCOffsetRange(Instruction* target);
// Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
// a PC-relative addressing instruction.
void SetImmPCOffsetTarget(AssemblerBase::IsolateData isolate_data,
void SetImmPCOffsetTarget(const AssemblerOptions& options,
Instruction* target);
void SetUnresolvedInternalReferenceImmTarget(AssemblerBase::IsolateData,
void SetUnresolvedInternalReferenceImmTarget(const AssemblerOptions& options,
Instruction* target);
// Patch a literal load instruction to load from 'source'.
void SetImmLLiteral(Instruction* source);
@ -441,8 +441,7 @@ class Instruction {
static const int ImmPCRelRangeBitwidth = 21;
static bool IsValidPCRelOffset(ptrdiff_t offset) { return is_int21(offset); }
void SetPCRelImmTarget(AssemblerBase::IsolateData isolate_data,
Instruction* target);
void SetPCRelImmTarget(const AssemblerOptions& options, Instruction* target);
void SetBranchImmTarget(Instruction* target);
};

View File

@ -69,6 +69,7 @@ static const CounterDescriptor kCounterList[] = {
{"Logical DP", Gauge},
{"Other Int DP", Gauge},
{"FP DP", Gauge},
{"NEON", Gauge},
{"Conditional Select", Gauge},
{"Conditional Compare", Gauge},

View File

@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/arm64/interface-descriptors-arm64.h"
#if V8_TARGET_ARCH_ARM64
#include "src/interface-descriptors.h"
@ -59,12 +57,6 @@ const Register StoreTransitionDescriptor::MapRegister() { return x5; }
const Register ApiGetterDescriptor::HolderRegister() { return x0; }
const Register ApiGetterDescriptor::CallbackRegister() { return x3; }
const Register MathPowTaggedDescriptor::exponent() { return x11; }
const Register MathPowIntegerDescriptor::exponent() { return x12; }
const Register GrowArrayElementsDescriptor::ObjectRegister() { return x0; }
const Register GrowArrayElementsDescriptor::KeyRegister() { return x3; }
@ -179,26 +171,7 @@ void ConstructStubDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ConstructTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x3: new target
// x1: target
// x0: number of arguments
Register registers[] = {x1, x3, x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void TransitionElementsKindDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x0: value (js_array)
// x1: to_map
Register registers[] = {x0, x1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void AbortJSDescriptor::InitializePlatformSpecific(
void AbortDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {x1};
data->InitializePlatformSpecific(arraysize(registers), registers);
@ -206,41 +179,7 @@ void AbortJSDescriptor::InitializePlatformSpecific(
void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
data->InitializePlatformSpecific(0, nullptr, nullptr);
}
void ArrayConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
Register registers[] = {x1, x3, x0, x2};
data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
// x1: function
// x2: allocation site with elements kind
// x0: number of arguments to the constructor function
Register registers[] = {x1, x2, x0};
data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// register state
// x0: number of arguments
// x1: function
// x2: allocation site with elements kind
Register registers[] = {x1, x2, x0};
data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
Register registers[] = {x1, x2, x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
data->InitializePlatformSpecific(0, nullptr);
}
void CompareDescriptor::InitializePlatformSpecific(
@ -251,7 +190,6 @@ void CompareDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void BinaryOpDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1: left operand
@ -262,32 +200,24 @@ void BinaryOpDescriptor::InitializePlatformSpecific(
void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
x1, // JSFunction
x3, // the new target
x0, // actual number of arguments
x2, // expected number of arguments
};
data->InitializePlatformSpecific(arraysize(registers), registers,
&default_descriptor);
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ApiCallbackDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
JavaScriptFrame::context_register(), // callee context
x4, // call_data
x2, // holder
x1, // api_function_address
};
data->InitializePlatformSpecific(arraysize(registers), registers,
&default_descriptor);
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterDispatchDescriptor::InitializePlatformSpecific(
@ -320,7 +250,9 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void InterpreterCEntryDescriptor::InitializePlatformSpecific(
namespace {
void InterpreterCEntryDescriptor_InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
x0, // argument count (argc)
@ -330,6 +262,18 @@ void InterpreterCEntryDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
} // namespace
void InterpreterCEntry1Descriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
}
void InterpreterCEntry2Descriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
InterpreterCEntryDescriptor_InitializePlatformSpecific(data);
}
void ResumeGeneratorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {

View File

@ -1,26 +0,0 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_ARM64_INTERFACE_DESCRIPTORS_ARM64_H_
#define V8_ARM64_INTERFACE_DESCRIPTORS_ARM64_H_
#include "src/interface-descriptors.h"
namespace v8 {
namespace internal {
class PlatformInterfaceDescriptor {
public:
explicit PlatformInterfaceDescriptor(TargetAddressStorageMode storage_mode)
: storage_mode_(storage_mode) {}
TargetAddressStorageMode storage_mode() { return storage_mode_; }
private:
TargetAddressStorageMode storage_mode_;
};
} // namespace internal
} // namespace v8
#endif // V8_ARM64_INTERFACE_DESCRIPTORS_ARM64_H_

View File

@ -24,17 +24,6 @@ MemOperand FieldMemOperand(Register object, int offset) {
}
MemOperand UntagSmiFieldMemOperand(Register object, int offset) {
return UntagSmiMemOperand(object, offset - kHeapObjectTag);
}
MemOperand UntagSmiMemOperand(Register object, int offset) {
// Assumes that Smis are shifted by 32 bits and little endianness.
STATIC_ASSERT(kSmiShift == 32);
return MemOperand(object, offset + (kSmiShift / kBitsPerByte));
}
void TurboAssembler::And(const Register& rd, const Register& rn,
const Operand& operand) {
DCHECK(allow_macro_instructions());
@ -297,6 +286,7 @@ void TurboAssembler::Asr(const Register& rd, const Register& rn,
}
void TurboAssembler::B(Label* label) {
DCHECK(allow_macro_instructions());
b(label);
CheckVeneerPool(false, false);
}
@ -1040,47 +1030,51 @@ void TurboAssembler::InitializeRootRegister() {
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
Mov(kRootRegister, Operand(roots_array_start));
Add(kRootRegister, kRootRegister, kRootRegisterBias);
}
void MacroAssembler::SmiTag(Register dst, Register src) {
STATIC_ASSERT(kXRegSizeInBits ==
static_cast<unsigned>(kSmiShift + kSmiValueSize));
DCHECK(dst.Is64Bits() && src.Is64Bits());
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
Lsl(dst, src, kSmiShift);
}
void MacroAssembler::SmiTag(Register smi) { SmiTag(smi, smi); }
void TurboAssembler::SmiUntag(Register dst, Register src) {
STATIC_ASSERT(kXRegSizeInBits ==
static_cast<unsigned>(kSmiShift + kSmiValueSize));
DCHECK(dst.Is64Bits() && src.Is64Bits());
if (FLAG_enable_slow_asserts) {
AssertSmi(src);
}
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
Asr(dst, src, kSmiShift);
}
void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
DCHECK(dst.Is64Bits());
if (SmiValuesAre32Bits()) {
if (src.IsImmediateOffset() && src.shift_amount() == 0) {
// Load value directly from the upper half-word.
// Assumes that Smis are shifted by 32 bits and little endianness.
DCHECK_EQ(kSmiShift, 32);
Ldrsw(dst,
MemOperand(src.base(), src.offset() + (kSmiShift / kBitsPerByte),
src.addrmode()));
} else {
Ldr(dst, src);
SmiUntag(dst);
}
} else {
DCHECK(SmiValuesAre31Bits());
Ldr(dst, src);
SmiUntag(dst);
}
}
void TurboAssembler::SmiUntag(Register smi) { SmiUntag(smi, smi); }
void MacroAssembler::SmiUntagToDouble(VRegister dst, Register src) {
DCHECK(dst.Is64Bits() && src.Is64Bits());
if (FLAG_enable_slow_asserts) {
AssertSmi(src);
}
Scvtf(dst, src, kSmiShift);
}
void MacroAssembler::SmiUntagToFloat(VRegister dst, Register src) {
DCHECK(dst.Is32Bits() && src.Is64Bits());
if (FLAG_enable_slow_asserts) {
AssertSmi(src);
}
Scvtf(dst, src, kSmiShift);
}
void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
Label* not_smi_label) {
STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
@ -1096,6 +1090,15 @@ void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
}
}
void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
Cmp(x, y);
B(eq, dest);
}
void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
Cmp(x, y);
B(lt, dest);
}
void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
JumpIfSmi(value, nullptr, not_smi_label);

View File

@ -8,7 +8,6 @@
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
#include "src/builtins/constants-table-builder.h"
#include "src/callable.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
@ -20,7 +19,7 @@
#include "src/instruction-stream.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
#include "src/snapshot/serializer-common.h"
#include "src/snapshot/snapshot.h"
#include "src/arm64/macro-assembler-arm64-inl.h"
#include "src/arm64/macro-assembler-arm64.h" // Cannot be the first include
@ -28,10 +27,10 @@
namespace v8 {
namespace internal {
MacroAssembler::MacroAssembler(Isolate* isolate, byte* buffer,
unsigned buffer_size,
CodeObjectRequired create_code_object)
: TurboAssembler(isolate, buffer, buffer_size, create_code_object) {
MacroAssembler::MacroAssembler(Isolate* isolate,
const AssemblerOptions& options, void* buffer,
int size, CodeObjectRequired create_code_object)
: TurboAssembler(isolate, options, buffer, size, create_code_object) {
if (create_code_object == CodeObjectRequired::kYes) {
// Unlike TurboAssembler, which can be used off the main thread and may not
// allocate, macro assembler creates its own copy of the self-reference
@ -49,22 +48,6 @@ CPURegList TurboAssembler::DefaultFPTmpList() {
return CPURegList(fp_scratch1, fp_scratch2);
}
TurboAssembler::TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
CodeObjectRequired create_code_object)
: Assembler(isolate, buffer, buffer_size),
isolate_(isolate),
#if DEBUG
allow_macro_instructions_(true),
#endif
tmp_list_(DefaultTmpList()),
fptmp_list_(DefaultFPTmpList()),
use_real_aborts_(true) {
if (create_code_object == CodeObjectRequired::kYes) {
code_object_ = Handle<HeapObject>::New(
isolate->heap()->self_reference_marker(), isolate);
}
}
int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion) const {
int bytes = 0;
@ -365,12 +348,12 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand,
}
void TurboAssembler::Mov(const Register& rd, ExternalReference reference) {
#ifdef V8_EMBEDDED_BUILTINS
if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
LookupExternalReference(rd, reference);
return;
if (FLAG_embedded_builtins) {
if (root_array_available_ && options().isolate_independent_code) {
IndirectLoadExternalReference(rd, reference);
return;
}
}
#endif // V8_EMBEDDED_BUILTINS
Mov(rd, Operand(reference));
}
@ -1445,7 +1428,8 @@ void MacroAssembler::PopCalleeSavedRegisters() {
}
void TurboAssembler::AssertSpAligned() {
if (emit_debug_code() && use_real_aborts()) {
if (emit_debug_code()) {
TrapOnAbortScope trap_on_abort_scope(this); // Avoid calls to Abort.
// Arm64 requires the stack pointer to be 16-byte aligned prior to address
// calculation.
UseScratchRegisterScope scope(this);
@ -1569,11 +1553,10 @@ void TurboAssembler::CanonicalizeNaN(const VRegister& dst,
Fsub(dst, src, fp_zero);
}
void TurboAssembler::LoadRoot(CPURegister destination,
Heap::RootListIndex index) {
void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
// TODO(jbramley): Most root values are constants, and can be synthesized
// without a load. Refer to the ARM back end for details.
Ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2));
Ldr(destination, MemOperand(kRootRegister, RootRegisterOffset(index)));
}
@ -1588,14 +1571,14 @@ void MacroAssembler::LoadObject(Register result, Handle<Object> object) {
void TurboAssembler::Move(Register dst, Register src) { Mov(dst, src); }
void TurboAssembler::Move(Register dst, Handle<HeapObject> x) {
#ifdef V8_EMBEDDED_BUILTINS
if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
LookupConstant(dst, x);
return;
void TurboAssembler::Move(Register dst, Handle<HeapObject> value) {
if (FLAG_embedded_builtins) {
if (root_array_available_ && options().isolate_independent_code) {
IndirectLoadConstant(dst, value);
return;
}
}
#endif // V8_EMBEDDED_BUILTINS
Mov(dst, x);
Mov(dst, value);
}
void TurboAssembler::Move(Register dst, Smi* src) { Mov(dst, src); }
@ -1644,18 +1627,6 @@ void MacroAssembler::AssertNotSmi(Register object, AbortReason reason) {
}
}
void MacroAssembler::AssertFixedArray(Register object) {
if (emit_debug_code()) {
AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotAFixedArray);
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
CompareObjectType(object, temp, temp, FIXED_ARRAY_TYPE);
Check(eq, AbortReason::kOperandIsNotAFixedArray);
}
}
void MacroAssembler::AssertConstructor(Register object) {
if (emit_debug_code()) {
AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotAConstructor);
@ -1726,7 +1697,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
AssertNotSmi(object);
JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &done_checking);
Ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
CompareInstanceType(scratch, scratch, ALLOCATION_SITE_TYPE);
Assert(eq, AbortReason::kExpectedUndefinedOrCell);
Bind(&done_checking);
}
@ -1765,8 +1736,8 @@ void MacroAssembler::TailCallStub(CodeStub* stub) {
Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
}
void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
SaveFPRegsMode save_doubles) {
void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid,
Register centry) {
const Runtime::Function* f = Runtime::FunctionForId(fid);
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
@ -1774,9 +1745,9 @@ void TurboAssembler::CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
// smarter.
Mov(x0, f->nargs);
Mov(x1, ExternalReference::Create(f));
Handle<Code> code =
CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
Call(code, RelocInfo::CODE_TARGET);
DCHECK(!AreAliased(centry, x0, x1));
Add(centry, centry, Operand(Code::kHeaderSize - kHeapObjectTag));
Call(centry);
}
void MacroAssembler::CallRuntime(const Runtime::Function* f,
@ -1883,54 +1854,28 @@ void TurboAssembler::CallCFunction(Register function, int num_of_reg_args,
}
}
#ifdef V8_EMBEDDED_BUILTINS
void TurboAssembler::LookupConstant(Register destination,
Handle<Object> object) {
CHECK(isolate()->ShouldLoadConstantsFromRootList());
CHECK(root_array_available_);
// Ensure the given object is in the builtins constants table and fetch its
// index.
BuiltinsConstantsTableBuilder* builder =
isolate()->builtins_constants_table_builder();
uint32_t index = builder->AddObject(object);
// TODO(jgruber): Load builtins from the builtins table.
// TODO(jgruber): Ensure that code generation can recognize constant targets
// in kArchCallCodeObject.
void TurboAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
Heap::kBuiltinsConstantsTableRootIndex));
LoadRoot(destination, Heap::kBuiltinsConstantsTableRootIndex);
Ldr(destination, FieldMemOperand(destination, FixedArray::kHeaderSize +
index * kPointerSize));
}
void TurboAssembler::LookupExternalReference(Register destination,
ExternalReference reference) {
CHECK(reference.address() !=
ExternalReference::roots_array_start(isolate()).address());
CHECK(isolate()->ShouldLoadConstantsFromRootList());
CHECK(root_array_available_);
// Encode as an index into the external reference table stored on the isolate.
ExternalReferenceEncoder encoder(isolate());
ExternalReferenceEncoder::Value v = encoder.Encode(reference.address());
CHECK(!v.is_from_api());
uint32_t index = v.index();
// Generate code to load from the external reference table.
int32_t roots_to_external_reference_offset =
Heap::roots_to_external_reference_table_offset() +
ExternalReferenceTable::OffsetOfEntry(index);
Ldr(destination,
MemOperand(kRootRegister, roots_to_external_reference_offset));
FieldMemOperand(destination,
FixedArray::kHeaderSize + constant_index * kPointerSize));
}
void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
Ldr(destination, MemOperand(kRootRegister, offset));
}
void TurboAssembler::LoadRootRegisterOffset(Register destination,
intptr_t offset) {
if (offset == 0) {
Move(destination, kRootRegister);
} else {
Add(destination, kRootRegister, Operand(offset));
}
}
#endif // V8_EMBEDDED_BUILTINS
void TurboAssembler::Jump(Register target, Condition cond) {
if (cond == nv) return;
@ -1969,7 +1914,7 @@ static int64_t CalculateTargetOffset(Address target, RelocInfo::Mode rmode,
int64_t offset = static_cast<int64_t>(target);
// The target of WebAssembly calls is still an index instead of an actual
// address at this point, and needs to be encoded as-is.
if (rmode != RelocInfo::WASM_CALL) {
if (rmode != RelocInfo::WASM_CALL && rmode != RelocInfo::WASM_STUB_CALL) {
offset -= reinterpret_cast<int64_t>(pc);
DCHECK_EQ(offset % kInstructionSize, 0);
offset = offset / static_cast<int>(kInstructionSize);
@ -1986,18 +1931,38 @@ void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Condition cond) {
DCHECK(RelocInfo::IsCodeTarget(rmode));
#ifdef V8_EMBEDDED_BUILTINS
if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
LookupConstant(scratch, code);
Add(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
Jump(scratch, cond);
return;
if (FLAG_embedded_builtins) {
if (root_array_available_ && options().isolate_independent_code &&
!Builtins::IsIsolateIndependentBuiltin(*code)) {
// Calls to embedded targets are initially generated as standard
// pc-relative calls below. When creating the embedded blob, call offsets
// are patched up to point directly to the off-heap instruction start.
// Note: It is safe to dereference {code} above since code generation
// for builtins and code stubs happens on the main thread.
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
IndirectLoadConstant(scratch, code);
Add(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
Jump(scratch, cond);
return;
} else if (options().inline_offheap_trampolines) {
int builtin_index = Builtins::kNoBuiltinId;
if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
Mov(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Jump(scratch, cond);
return;
}
}
}
#endif // V8_EMBEDDED_BUILTINS
if (CanUseNearCallOrJump(rmode)) {
JumpHelper(static_cast<int64_t>(GetCodeTargetIndex(code)), rmode, cond);
JumpHelper(static_cast<int64_t>(AddCodeTarget(code)), rmode, cond);
} else {
Jump(code.address(), rmode, cond);
}
@ -2045,18 +2010,38 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode) {
Bind(&start_call);
#endif
#ifdef V8_EMBEDDED_BUILTINS
if (root_array_available_ && isolate()->ShouldLoadConstantsFromRootList()) {
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
LookupConstant(scratch, code);
Add(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
Call(scratch);
return;
if (FLAG_embedded_builtins) {
if (root_array_available_ && options().isolate_independent_code &&
!Builtins::IsIsolateIndependentBuiltin(*code)) {
// Calls to embedded targets are initially generated as standard
// pc-relative calls below. When creating the embedded blob, call offsets
// are patched up to point directly to the off-heap instruction start.
// Note: It is safe to dereference {code} above since code generation
// for builtins and code stubs happens on the main thread.
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
IndirectLoadConstant(scratch, code);
Add(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
Call(scratch);
return;
} else if (options().inline_offheap_trampolines) {
int builtin_index = Builtins::kNoBuiltinId;
if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
Mov(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Call(scratch);
return;
}
}
}
#endif // V8_EMBEDDED_BUILTINS
if (CanUseNearCallOrJump(rmode)) {
near_call(GetCodeTargetIndex(code), rmode);
near_call(AddCodeTarget(code), rmode);
} else {
IndirectCall(code.address(), rmode);
}
@ -2087,7 +2072,7 @@ bool TurboAssembler::IsNearCallOffset(int64_t offset) {
return is_int26(offset);
}
void TurboAssembler::CallForDeoptimization(Address target,
void TurboAssembler::CallForDeoptimization(Address target, int deopt_id,
RelocInfo::Mode rmode) {
DCHECK_EQ(rmode, RelocInfo::RUNTIME_ENTRY);
@ -2096,22 +2081,20 @@ void TurboAssembler::CallForDeoptimization(Address target,
Label start_call;
Bind(&start_call);
#endif
// The deoptimizer requires the deoptimization id to be in x16.
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
// Deoptimisation table entries require the call address to be in x16, in
// order to compute the entry id.
// TODO(all): Put the entry id back in the table now that we are using
// a direct branch for the call and do not need to set up x16.
DCHECK(temp.Is(x16));
Mov(temp, Immediate(target, rmode));
// Make sure that the deopt id can be encoded in 16 bits, so can be encoded
// in a single movz instruction with a zero shift.
DCHECK(is_uint16(deopt_id));
movz(temp, deopt_id);
int64_t offset = static_cast<int64_t>(target) -
static_cast<int64_t>(isolate_data().code_range_start_);
static_cast<int64_t>(options().code_range_start);
DCHECK_EQ(offset % kInstructionSize, 0);
offset = offset / static_cast<int>(kInstructionSize);
DCHECK(IsNearCallOffset(offset));
near_call(static_cast<int>(offset), rmode);
near_call(static_cast<int>(offset), RelocInfo::RUNTIME_ENTRY);
#ifdef DEBUG
AssertSizeOfCodeGeneratedSince(&start_call, kNearCallSize + kInstructionSize);
@ -2385,9 +2368,9 @@ void MacroAssembler::InvokeFunction(Register function, Register new_target,
// extension to correctly handle it.
Ldr(expected_reg, FieldMemOperand(function,
JSFunction::kSharedFunctionInfoOffset));
Ldrsw(expected_reg,
FieldMemOperand(expected_reg,
SharedFunctionInfo::kFormalParameterCountOffset));
Ldrh(expected_reg,
FieldMemOperand(expected_reg,
SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount expected(expected_reg);
InvokeFunctionCode(function, new_target, expected, actual, flag);
@ -2436,7 +2419,8 @@ void TurboAssembler::TryConvertDoubleToInt64(Register result,
void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
Register result,
DoubleRegister double_input) {
DoubleRegister double_input,
StubCallMode stub_mode) {
Label done;
// Try to convert the double to an int64. If successful, the bottom 32 bits
@ -2447,7 +2431,11 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
Push(lr, double_input);
// DoubleToI preserves any registers it needs to clobber.
Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
} else {
Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
}
Ldr(result, MemOperand(sp, 0));
DCHECK_EQ(xzr.SizeInBytes(), double_input.SizeInBytes());
@ -2467,17 +2455,19 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
UseScratchRegisterScope temps(this);
if (type == StackFrame::INTERNAL) {
Register code_reg = temps.AcquireX();
Move(code_reg, CodeObject());
Register type_reg = temps.AcquireX();
Mov(type_reg, StackFrame::TypeToMarker(type));
Push(lr, fp, type_reg, code_reg);
Add(fp, sp, InternalFrameConstants::kFixedFrameSizeFromFp);
// sp[4] : lr
// sp[3] : fp
// type_reg pushed twice for alignment.
Push(lr, fp, type_reg, type_reg);
const int kFrameSize =
TypedFrameConstants::kFixedFrameSizeFromFp + kPointerSize;
Add(fp, sp, kFrameSize);
// sp[3] : lr
// sp[2] : fp
// sp[1] : type
// sp[0] : [code object]
} else if (type == StackFrame::WASM_COMPILED) {
// sp[0] : for alignment
} else if (type == StackFrame::WASM_COMPILED ||
type == StackFrame::WASM_COMPILE_LAZY) {
Register type_reg = temps.AcquireX();
Mov(type_reg, StackFrame::TypeToMarker(type));
Push(lr, fp);
@ -2507,15 +2497,10 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
}
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
if (type == StackFrame::WASM_COMPILED) {
Mov(sp, fp);
Pop(fp, lr);
} else {
// Drop the execution stack down to the frame pointer and restore
// the caller frame pointer and return address.
Mov(sp, fp);
Pop(fp, lr);
}
// Drop the execution stack down to the frame pointer and restore
// the caller frame pointer and return address.
Mov(sp, fp);
Pop(fp, lr);
}
@ -3046,12 +3031,13 @@ void TurboAssembler::Abort(AbortReason reason) {
#ifdef DEBUG
RecordComment("Abort message: ");
RecordComment(GetAbortReason(reason));
#endif
if (FLAG_trap_on_abort) {
// Avoid emitting call to builtin if requested.
if (trap_on_abort()) {
Brk(0);
return;
}
#endif
// We need some scratch registers for the MacroAssembler, so make sure we have
// some. This is safe here because Abort never returns.

View File

@ -11,6 +11,7 @@
#include "src/bailout-reason.h"
#include "src/base/bits.h"
#include "src/globals.h"
#include "src/turbo-assembler.h"
// Simulator specific helpers.
#if USE_SIMULATOR
@ -41,24 +42,28 @@ namespace v8 {
namespace internal {
// Give alias names to registers for calling conventions.
#define kReturnRegister0 x0
#define kReturnRegister1 x1
#define kReturnRegister2 x2
#define kJSFunctionRegister x1
#define kContextRegister cp
#define kAllocateSizeRegister x1
#define kSpeculationPoisonRegister x18
#define kInterpreterAccumulatorRegister x0
#define kInterpreterBytecodeOffsetRegister x19
#define kInterpreterBytecodeArrayRegister x20
#define kInterpreterDispatchTableRegister x21
#define kJavaScriptCallArgCountRegister x0
#define kJavaScriptCallCodeStartRegister x2
#define kJavaScriptCallNewTargetRegister x3
#define kOffHeapTrampolineRegister ip0
#define kRuntimeCallFunctionRegister x1
#define kRuntimeCallArgCountRegister x0
#define kWasmInstanceRegister x7
constexpr Register kReturnRegister0 = x0;
constexpr Register kReturnRegister1 = x1;
constexpr Register kReturnRegister2 = x2;
constexpr Register kJSFunctionRegister = x1;
constexpr Register kContextRegister = cp;
constexpr Register kAllocateSizeRegister = x1;
constexpr Register kSpeculationPoisonRegister = x18;
constexpr Register kInterpreterAccumulatorRegister = x0;
constexpr Register kInterpreterBytecodeOffsetRegister = x19;
constexpr Register kInterpreterBytecodeArrayRegister = x20;
constexpr Register kInterpreterDispatchTableRegister = x21;
constexpr Register kJavaScriptCallArgCountRegister = x0;
constexpr Register kJavaScriptCallCodeStartRegister = x2;
constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
constexpr Register kJavaScriptCallNewTargetRegister = x3;
constexpr Register kJavaScriptCallExtraArg1Register = x2;
constexpr Register kOffHeapTrampolineRegister = ip0;
constexpr Register kRuntimeCallFunctionRegister = x1;
constexpr Register kRuntimeCallArgCountRegister = x0;
constexpr Register kWasmInstanceRegister = x7;
#define LS_MACRO_LIST(V) \
V(Ldrb, Register&, rt, LDRB_w) \
@ -97,11 +102,6 @@ namespace internal {
// Generate a MemOperand for loading a field from an object.
inline MemOperand FieldMemOperand(Register object, int offset);
inline MemOperand UntagSmiFieldMemOperand(Register object, int offset);
// Generate a MemOperand for loading a SMI from memory.
inline MemOperand UntagSmiMemOperand(Register object, int offset);
// ----------------------------------------------------------------------------
// MacroAssembler
@ -177,10 +177,13 @@ enum PreShiftImmMode {
kAnyShift // Allow any pre-shift.
};
class TurboAssembler : public Assembler {
class TurboAssembler : public TurboAssemblerBase {
public:
TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
CodeObjectRequired create_code_object);
TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
void* buffer, int buffer_size,
CodeObjectRequired create_code_object)
: TurboAssemblerBase(isolate, options, buffer, buffer_size,
create_code_object) {}
// The Abort method should call a V8 runtime function, but the CallRuntime
// mechanism depends on CEntry. If use_real_aborts is false, Abort will
@ -203,16 +206,6 @@ class TurboAssembler : public Assembler {
TurboAssembler* tasm_;
};
void set_has_frame(bool value) { has_frame_ = value; }
bool has_frame() const { return has_frame_; }
Isolate* isolate() const { return isolate_; }
Handle<HeapObject> CodeObject() {
DCHECK(!code_object_.is_null());
return code_object_;
}
#if DEBUG
void set_allow_macro_instructions(bool value) {
allow_macro_instructions_ = value;
@ -264,7 +257,7 @@ class TurboAssembler : public Assembler {
// This is required for compatibility with architecture independent code.
// Remove if not needed.
void Move(Register dst, Register src);
void Move(Register dst, Handle<HeapObject> x);
void Move(Register dst, Handle<HeapObject> value);
void Move(Register dst, Smi* src);
// Register swap. Note that the register operands should be distinct.
@ -573,9 +566,10 @@ class TurboAssembler : public Assembler {
bool AllowThisStubCall(CodeStub* stub);
void CallStubDelayed(CodeStub* stub);
// TODO(jgruber): Remove in favor of MacroAssembler::CallRuntime.
void CallRuntimeDelayed(Zone* zone, Runtime::FunctionId fid,
SaveFPRegsMode save_doubles = kDontSaveFPRegs);
// Call a runtime routine. This expects {centry} to contain a fitting CEntry
// builtin for the target runtime function and uses an indirect call.
void CallRuntimeWithCEntry(Runtime::FunctionId fid, Register centry);
// Removes current frame and its arguments from the stack preserving
// the arguments and a return address pushed to the stack for the next call.
@ -587,6 +581,7 @@ class TurboAssembler : public Assembler {
Register scratch1);
inline void SmiUntag(Register dst, Register src);
inline void SmiUntag(Register dst, const MemOperand& src);
inline void SmiUntag(Register smi);
// Calls Abort(msg) if the condition cond is not satisfied.
@ -862,6 +857,9 @@ class TurboAssembler : public Assembler {
inline void JumpIfSmi(Register value, Label* smi_label,
Label* not_smi_label = nullptr);
inline void JumpIfEqual(Register x, int32_t y, Label* dest);
inline void JumpIfLessThan(Register x, int32_t y, Label* dest);
inline void Fmov(VRegister fd, VRegister fn);
inline void Fmov(VRegister fd, Register rn);
// Provide explicit double and float interfaces for FP immediate moves, rather
@ -882,11 +880,10 @@ class TurboAssembler : public Assembler {
int shift_amount = 0);
void Movi(const VRegister& vd, uint64_t hi, uint64_t lo);
#ifdef V8_EMBEDDED_BUILTINS
void LookupConstant(Register destination, Handle<Object> object);
void LookupExternalReference(Register destination,
ExternalReference reference);
#endif // V8_EMBEDDED_BUILTINS
void LoadFromConstantsTable(Register destination,
int constant_index) override;
void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
void LoadRootRelative(Register destination, int32_t offset) override;
void Jump(Register target, Condition cond = al);
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
@ -900,7 +897,8 @@ class TurboAssembler : public Assembler {
// Generate an indirect call (for when a direct call's range is not adequate).
void IndirectCall(Address target, RelocInfo::Mode rmode);
void CallForDeoptimization(Address target, RelocInfo::Mode rmode);
void CallForDeoptimization(Address target, int deopt_id,
RelocInfo::Mode rmode);
// For every Call variant, there is a matching CallSize function that returns
// the size (in bytes) of the call sequence.
@ -924,7 +922,7 @@ class TurboAssembler : public Assembler {
// the JS bitwise operations. See ECMA-262 9.5: ToInt32.
// Exits with 'result' holding the answer.
void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
DoubleRegister double_input);
DoubleRegister double_input, StubCallMode stub_mode);
inline void Mul(const Register& rd, const Register& rn, const Register& rm);
@ -1167,7 +1165,7 @@ class TurboAssembler : public Assembler {
#undef DECLARE_FUNCTION
// Load an object from the root table.
void LoadRoot(CPURegister destination, Heap::RootListIndex index);
void LoadRoot(Register destination, Heap::RootListIndex index) override;
inline void Ret(const Register& xn = lr);
@ -1231,9 +1229,6 @@ class TurboAssembler : public Assembler {
void ResetSpeculationPoisonRegister();
bool root_array_available() const { return root_array_available_; }
void set_root_array_available(bool v) { root_array_available_ = v; }
protected:
// The actual Push and Pop implementations. These don't generate any code
// other than that required for the push or pop. This allows
@ -1266,26 +1261,20 @@ class TurboAssembler : public Assembler {
// have mixed types. The format string (x0) should not be included.
void CallPrintf(int arg_count = 0, const CPURegister* args = nullptr);
// This handle will be patched with the code object on installation.
Handle<HeapObject> code_object_;
private:
bool has_frame_ = false;
bool root_array_available_ = true;
Isolate* const isolate_;
#if DEBUG
// Tell whether any of the macro instruction can be used. When false the
// MacroAssembler will assert if a method which can emit a variable number
// of instructions is called.
bool allow_macro_instructions_;
bool allow_macro_instructions_ = true;
#endif
// Scratch registers available for use by the MacroAssembler.
CPURegList tmp_list_;
CPURegList fptmp_list_;
CPURegList tmp_list_ = DefaultTmpList();
CPURegList fptmp_list_ = DefaultFPTmpList();
bool use_real_aborts_;
bool use_real_aborts_ = true;
// Helps resolve branching to labels potentially out of range.
// If the label is not bound, it registers the information necessary to later
@ -1314,8 +1303,12 @@ class TurboAssembler : public Assembler {
class MacroAssembler : public TurboAssembler {
public:
MacroAssembler(Isolate* isolate, byte* buffer, unsigned buffer_size,
CodeObjectRequired create_code_object);
MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
: MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer,
size, create_code_object) {}
MacroAssembler(Isolate* isolate, const AssemblerOptions& options,
void* buffer, int size, CodeObjectRequired create_code_object);
// Instruction set functions ------------------------------------------------
// Logical macros.
@ -1716,8 +1709,6 @@ class MacroAssembler : public TurboAssembler {
inline void SmiTag(Register dst, Register src);
inline void SmiTag(Register smi);
inline void SmiUntagToDouble(VRegister dst, Register src);
inline void SmiUntagToFloat(VRegister dst, Register src);
inline void JumpIfNotSmi(Register value, Label* not_smi_label);
inline void JumpIfBothSmi(Register value1, Register value2,
@ -1740,9 +1731,6 @@ class MacroAssembler : public TurboAssembler {
inline void ObjectTag(Register tagged_obj, Register obj);
inline void ObjectUntag(Register untagged_obj, Register obj);
// Abort execution if argument is not a FixedArray, enabled via --debug-code.
void AssertFixedArray(Register object);
// Abort execution if argument is not a Constructor, enabled via --debug-code.
void AssertConstructor(Register object);

View File

@ -3184,7 +3184,7 @@ void Simulator::Debug() {
(strcmp(cmd, "po") == 0)) {
if (argc == 2) {
int64_t value;
OFStream os(stdout);
StdoutStream os;
if (GetValue(arg1, &value)) {
Object* obj = reinterpret_cast<Object*>(value);
os << arg1 << ": \n";
@ -3246,7 +3246,7 @@ void Simulator::Debug() {
current_heap->ContainsSlow(obj->address())) {
PrintF(" (");
if ((value & kSmiTagMask) == 0) {
STATIC_ASSERT(kSmiValueSize == 32);
DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
int32_t untagged = (value >> kSmiShift) & 0xFFFFFFFF;
PrintF("smi %" PRId32, untagged);
} else {

View File

@ -338,10 +338,10 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(Isolate* isolate,
base::ElapsedTimer instantiate_timer;
instantiate_timer.Start();
Handle<HeapNumber> uses_bitset(
HeapNumber::cast(wasm_data->get(kWasmDataUsesBitSet)));
HeapNumber::cast(wasm_data->get(kWasmDataUsesBitSet)), isolate);
Handle<WasmModuleObject> module(
WasmModuleObject::cast(wasm_data->get(kWasmDataCompiledModule)));
Handle<Script> script(Script::cast(shared->script()));
WasmModuleObject::cast(wasm_data->get(kWasmDataCompiledModule)), isolate);
Handle<Script> script(Script::cast(shared->script()), isolate);
// TODO(mstarzinger): The position currently points to the module definition
// but should instead point to the instantiation site (more intuitive).
int position = shared->StartPosition();
@ -405,7 +405,7 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(Isolate* isolate,
Handle<Name> single_function_name(
isolate->factory()->InternalizeUtf8String(AsmJs::kSingleFunctionName));
MaybeHandle<Object> single_function =
Object::GetProperty(module_object, single_function_name);
Object::GetProperty(isolate, module_object, single_function_name);
if (!single_function.is_null() &&
!single_function.ToHandleChecked()->IsUndefined(isolate)) {
return single_function;
@ -413,7 +413,7 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(Isolate* isolate,
Handle<String> exports_name =
isolate->factory()->InternalizeUtf8String("exports");
return Object::GetProperty(module_object, exports_name);
return Object::GetProperty(isolate, module_object, exports_name);
}
} // namespace internal

View File

@ -553,7 +553,7 @@ void AsmJsParser::ValidateModuleVarImport(VarInfo* info,
} else {
info->kind = VarKind::kImportedFunction;
info->import = new (zone()->New(sizeof(FunctionImportInfo)))
FunctionImportInfo({name, WasmModuleBuilder::SignatureMap(zone())});
FunctionImportInfo(name, zone());
info->mutable_variable = false;
}
}
@ -2210,14 +2210,14 @@ AsmType* AsmJsParser::ValidateCall() {
DCHECK_NOT_NULL(function_info->import);
// TODO(bradnelson): Factor out.
uint32_t index;
auto it = function_info->import->cache.find(sig);
auto it = function_info->import->cache.find(*sig);
if (it != function_info->import->cache.end()) {
index = it->second;
DCHECK(function_info->function_defined);
} else {
index =
module_builder_->AddImport(function_info->import->function_name, sig);
function_info->import->cache[sig] = index;
function_info->import->cache[*sig] = index;
function_info->function_defined = true;
}
current_function_builder_->AddAsmWasmOffset(call_pos, to_number_pos);

View File

@ -76,9 +76,16 @@ class AsmJsParser {
};
// clang-format on
// A single import in asm.js can require multiple imports in wasm, if the
// function is used with different signatures. {cache} keeps the wasm
// imports for the single asm.js import of name {function_name}.
struct FunctionImportInfo {
Vector<const char> function_name;
WasmModuleBuilder::SignatureMap cache;
ZoneUnorderedMap<FunctionSig, uint32_t> cache;
// Constructor.
FunctionImportInfo(Vector<const char> name, Zone* zone)
: function_name(name), cache(zone) {}
};
struct VarInfo {

View File

@ -4,6 +4,7 @@
#include "src/asmjs/asm-scanner.h"
#include "src/char-predicates-inl.h"
#include "src/conversions.h"
#include "src/flags.h"
#include "src/parsing/scanner.h"
@ -273,17 +274,22 @@ void AsmJsScanner::ConsumeNumber(uc32 ch) {
std::string number;
number = ch;
bool has_dot = ch == '.';
bool has_prefix = false;
for (;;) {
ch = stream_->Advance();
if ((ch >= '0' && ch <= '9') || (ch >= 'a' && ch <= 'f') ||
(ch >= 'A' && ch <= 'F') || ch == '.' || ch == 'b' || ch == 'o' ||
ch == 'x' ||
((ch == '-' || ch == '+') && (number[number.size() - 1] == 'e' ||
number[number.size() - 1] == 'E'))) {
((ch == '-' || ch == '+') && !has_prefix &&
(number[number.size() - 1] == 'e' ||
number[number.size() - 1] == 'E'))) {
// TODO(bradnelson): Test weird cases ending in -.
if (ch == '.') {
has_dot = true;
}
if (ch == 'b' || ch == 'o' || ch == 'x') {
has_prefix = true;
}
number.push_back(ch);
} else {
break;
@ -413,16 +419,13 @@ void AsmJsScanner::ConsumeCompareOrShift(uc32 ch) {
}
bool AsmJsScanner::IsIdentifierStart(uc32 ch) {
return (ch >= 'A' && ch <= 'Z') || (ch >= 'a' && ch <= 'z') || ch == '_' ||
ch == '$';
return IsInRange(AsciiAlphaToLower(ch), 'a', 'z') || ch == '_' || ch == '$';
}
bool AsmJsScanner::IsIdentifierPart(uc32 ch) {
return IsIdentifierStart(ch) || (ch >= '0' && ch <= '9');
}
bool AsmJsScanner::IsIdentifierPart(uc32 ch) { return IsAsciiIdentifier(ch); }
bool AsmJsScanner::IsNumberStart(uc32 ch) {
return ch == '.' || (ch >= '0' && ch <= '9');
return ch == '.' || IsDecimalDigit(ch);
}
} // namespace internal

View File

@ -43,28 +43,40 @@
#include "src/ostreams.h"
#include "src/simulator.h" // For flushing instruction cache.
#include "src/snapshot/serializer-common.h"
#include "src/snapshot/snapshot.h"
namespace v8 {
namespace internal {
const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
AssemblerOptions AssemblerOptions::Default(
Isolate* isolate, bool explicitly_support_serialization) {
AssemblerOptions options;
bool serializer =
isolate->serializer_enabled() || explicitly_support_serialization;
options.record_reloc_info_for_serialization = serializer;
options.enable_root_array_delta_access = !serializer;
#ifdef USE_SIMULATOR
// Don't generate simulator specific code if we are building a snapshot, which
// might be run on real hardware.
options.enable_simulator_code = !serializer;
#endif
options.isolate_independent_code = isolate->ShouldLoadConstantsFromRootList();
options.inline_offheap_trampolines = !serializer;
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
options.code_range_start =
isolate->heap()->memory_allocator()->code_range()->start();
#endif
return options;
}
// -----------------------------------------------------------------------------
// Implementation of AssemblerBase
AssemblerBase::IsolateData::IsolateData(Isolate* isolate)
: serializer_enabled_(isolate->serializer_enabled())
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
,
code_range_start_(
isolate->heap()->memory_allocator()->code_range()->start())
#endif
{
}
AssemblerBase::AssemblerBase(IsolateData isolate_data, void* buffer,
AssemblerBase::AssemblerBase(const AssemblerOptions& options, void* buffer,
int buffer_size)
: isolate_data_(isolate_data),
: options_(options),
enabled_cpu_features_(0),
emit_debug_code_(FLAG_debug_code),
predictable_code_size_(false),
@ -95,7 +107,7 @@ void AssemblerBase::FlushICache(void* start, size_t size) {
}
void AssemblerBase::Print(Isolate* isolate) {
OFStream os(stdout);
StdoutStream os;
v8::internal::Disassembler::Decode(isolate, &os, buffer_, pc_);
}
@ -164,8 +176,7 @@ unsigned CpuFeatures::dcache_line_size_ = 0;
//
// 01: code_target: [6-bit pc delta] 01
//
// 10: short_data_record: [6-bit pc delta] 10 followed by
// [8-bit data delta]
// 10: wasm_stub_call: [6-bit pc delta] 10
//
// 11: long_record [6 bit reloc mode] 11
// followed by pc delta
@ -189,7 +200,7 @@ const int kLongTagBits = 6;
const int kEmbeddedObjectTag = 0;
const int kCodeTargetTag = 1;
const int kLocatableTag = 2;
const int kWasmStubCallTag = 2;
const int kDefaultTag = 3;
const int kSmallPCDeltaBits = kBitsPerByte - kTagBits;
@ -202,54 +213,6 @@ const int kLastChunkTagBits = 1;
const int kLastChunkTagMask = 1;
const int kLastChunkTag = 1;
// static
bool RelocInfo::OffHeapTargetIsCodedSpecially() {
#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_ARM64) || \
defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32)
return false;
#elif defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_S390)
return true;
#endif
}
void RelocInfo::set_global_handle(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, WASM_GLOBAL_HANDLE);
set_embedded_address(address, icache_flush_mode);
}
Address RelocInfo::wasm_call_address() const {
DCHECK_EQ(rmode_, WASM_CALL);
return Assembler::target_address_at(pc_, constant_pool_);
}
void RelocInfo::set_wasm_call_address(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, WASM_CALL);
Assembler::set_target_address_at(pc_, constant_pool_, address,
icache_flush_mode);
}
Address RelocInfo::global_handle() const {
DCHECK_EQ(rmode_, WASM_GLOBAL_HANDLE);
return embedded_address();
}
void RelocInfo::set_target_address(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
Assembler::set_target_address_at(pc_, constant_pool_, target,
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr &&
IsCodeTarget(rmode_)) {
Code* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
target_code);
}
}
uint32_t RelocInfoWriter::WriteLongPCJump(uint32_t pc_delta) {
// Return if the pc_delta can fit in kSmallPCDeltaBits bits.
// Otherwise write a variable length PC jump for the bits that do
@ -324,14 +287,15 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
} else if (rmode == RelocInfo::CODE_TARGET) {
WriteShortTaggedPC(pc_delta, kCodeTargetTag);
DCHECK_LE(begin_pos - pos_, RelocInfo::kMaxCallSize);
} else if (rmode == RelocInfo::DEOPT_REASON) {
DCHECK(rinfo->data() < (1 << kBitsPerByte));
WriteShortTaggedPC(pc_delta, kLocatableTag);
WriteShortData(rinfo->data());
} else if (rmode == RelocInfo::WASM_STUB_CALL) {
WriteShortTaggedPC(pc_delta, kWasmStubCallTag);
} else {
WriteModeAndPC(pc_delta, rmode);
if (RelocInfo::IsComment(rmode)) {
WriteData(rinfo->data());
} else if (RelocInfo::IsDeoptReason(rmode)) {
DCHECK_LT(rinfo->data(), 1 << kBitsPerByte);
WriteShortData(rinfo->data());
} else if (RelocInfo::IsConstPool(rmode) ||
RelocInfo::IsVeneerPool(rmode) || RelocInfo::IsDeoptId(rmode) ||
RelocInfo::IsDeoptPosition(rmode)) {
@ -412,13 +376,9 @@ void RelocIterator::next() {
} else if (tag == kCodeTargetTag) {
ReadShortTaggedPC();
if (SetMode(RelocInfo::CODE_TARGET)) return;
} else if (tag == kLocatableTag) {
} else if (tag == kWasmStubCallTag) {
ReadShortTaggedPC();
Advance();
if (SetMode(RelocInfo::DEOPT_REASON)) {
ReadShortData();
return;
}
if (SetMode(RelocInfo::WASM_STUB_CALL)) return;
} else {
DCHECK_EQ(tag, kDefaultTag);
RelocInfo::Mode rmode = GetMode();
@ -432,6 +392,12 @@ void RelocIterator::next() {
return;
}
Advance(kIntptrSize);
} else if (RelocInfo::IsDeoptReason(rmode)) {
Advance();
if (SetMode(rmode)) {
ReadShortData();
return;
}
} else if (RelocInfo::IsConstPool(rmode) ||
RelocInfo::IsVeneerPool(rmode) ||
RelocInfo::IsDeoptId(rmode) ||
@ -461,6 +427,14 @@ RelocIterator::RelocIterator(const CodeReference code_reference, int mode_mask)
code_reference.relocation_end(),
code_reference.relocation_start(), mode_mask) {}
RelocIterator::RelocIterator(EmbeddedData* embedded_data, Code* code,
int mode_mask)
: RelocIterator(
code, embedded_data->InstructionStartOfBuiltin(code->builtin_index()),
code->constant_pool(),
code->relocation_start() + code->relocation_size(),
code->relocation_start(), mode_mask) {}
RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask)
: RelocIterator(nullptr, reinterpret_cast<Address>(desc.buffer), 0,
desc.buffer + desc.buffer_size,
@ -472,9 +446,7 @@ RelocIterator::RelocIterator(Vector<byte> instructions,
int mode_mask)
: RelocIterator(nullptr, reinterpret_cast<Address>(instructions.start()),
const_pool, reloc_info.start() + reloc_info.size(),
reloc_info.start(), mode_mask) {
rinfo_.flags_ = RelocInfo::kInNativeWasmCode;
}
reloc_info.start(), mode_mask) {}
RelocIterator::RelocIterator(Code* host, Address pc, Address constant_pool,
const byte* pos, const byte* end, int mode_mask)
@ -491,12 +463,63 @@ RelocIterator::RelocIterator(Code* host, Address pc, Address constant_pool,
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
// static
bool RelocInfo::OffHeapTargetIsCodedSpecially() {
#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_ARM64) || \
defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32)
return false;
#elif defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_S390)
return true;
#endif
}
Address RelocInfo::wasm_call_address() const {
DCHECK_EQ(rmode_, WASM_CALL);
return Assembler::target_address_at(pc_, constant_pool_);
}
void RelocInfo::set_wasm_call_address(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, WASM_CALL);
Assembler::set_target_address_at(pc_, constant_pool_, address,
icache_flush_mode);
}
Address RelocInfo::wasm_stub_call_address() const {
DCHECK_EQ(rmode_, WASM_STUB_CALL);
return Assembler::target_address_at(pc_, constant_pool_);
}
void RelocInfo::set_wasm_stub_call_address(Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, WASM_STUB_CALL);
Assembler::set_target_address_at(pc_, constant_pool_, address,
icache_flush_mode);
}
void RelocInfo::set_target_address(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTargetMode(rmode_) || IsRuntimeEntry(rmode_) ||
IsWasmCall(rmode_));
Assembler::set_target_address_at(pc_, constant_pool_, target,
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr &&
IsCodeTargetMode(rmode_)) {
Code* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
target_code);
}
}
#ifdef DEBUG
bool RelocInfo::RequiresRelocation(const CodeDesc& desc) {
// Ensure there are no code targets or embedded objects present in the
// deoptimization entries, they would require relocation after code
// generation.
int mode_mask = RelocInfo::kCodeTargetMask |
int mode_mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET) |
RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
RelocInfo::kApplyMask;
RelocIterator it(desc, mode_mask);
@ -513,6 +536,8 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "embedded object";
case CODE_TARGET:
return "code target";
case RELATIVE_CODE_TARGET:
return "relative code target";
case RUNTIME_ENTRY:
return "runtime entry";
case COMMENT:
@ -537,12 +562,10 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "constant pool";
case VENEER_POOL:
return "veneer pool";
case WASM_GLOBAL_HANDLE:
return "global handle";
case WASM_CALL:
return "internal wasm call";
case WASM_CODE_TABLE_ENTRY:
return "wasm code table entry";
case WASM_STUB_CALL:
return "wasm stub call";
case JS_TO_WASM_CALL:
return "js to wasm call";
case NUMBER_OF_MODES:
@ -564,33 +587,32 @@ void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT
} else if (rmode_ == EMBEDDED_OBJECT) {
os << " (" << Brief(target_object()) << ")";
} else if (rmode_ == EXTERNAL_REFERENCE) {
ExternalReferenceEncoder ref_encoder(isolate);
os << " ("
<< ref_encoder.NameOfAddress(isolate, target_external_reference())
<< ") (" << reinterpret_cast<const void*>(target_external_reference())
<< ")";
} else if (IsCodeTarget(rmode_)) {
const Address code_target = target_address();
if (flags_ & kInNativeWasmCode) {
os << " (wasm trampoline) ";
} else {
Code* code = Code::GetCodeFromTargetAddress(code_target);
DCHECK(code->IsCode());
os << " (" << Code::Kind2String(code->kind());
if (Builtins::IsBuiltin(code)) {
os << " " << Builtins::name(code->builtin_index());
} else if (code->kind() == Code::STUB) {
os << " " << CodeStub::MajorName(CodeStub::GetMajorKey(code));
}
os << ") ";
if (isolate) {
ExternalReferenceEncoder ref_encoder(isolate);
os << " ("
<< ref_encoder.NameOfAddress(isolate, target_external_reference())
<< ") ";
}
os << " (" << reinterpret_cast<const void*>(target_address()) << ")";
os << " (" << reinterpret_cast<const void*>(target_external_reference())
<< ")";
} else if (IsCodeTargetMode(rmode_)) {
const Address code_target = target_address();
Code* code = Code::GetCodeFromTargetAddress(code_target);
DCHECK(code->IsCode());
os << " (" << Code::Kind2String(code->kind());
if (Builtins::IsBuiltin(code)) {
os << " " << Builtins::name(code->builtin_index());
} else if (code->kind() == Code::STUB) {
os << " " << CodeStub::MajorName(CodeStub::GetMajorKey(code));
}
os << ") (" << reinterpret_cast<const void*>(target_address()) << ")";
} else if (IsRuntimeEntry(rmode_) && isolate->deoptimizer_data() != nullptr) {
// Depotimization bailouts are stored as runtime entries.
int id = Deoptimizer::GetDeoptimizationId(
isolate, target_address(), Deoptimizer::EAGER);
if (id != Deoptimizer::kNotDeoptimizationEntry) {
os << " (deoptimization bailout " << id << ")";
// Deoptimization bailouts are stored as runtime entries.
DeoptimizeKind type;
if (Deoptimizer::IsDeoptimizationEntry(isolate, target_address(), &type)) {
int id = GetDeoptimizationId(isolate, type);
os << " (" << Deoptimizer::MessageFor(type) << " deoptimization bailout "
<< id << ")";
}
} else if (IsConstPool(rmode_)) {
os << " (size " << static_cast<int>(data_) << ")";
@ -604,9 +626,10 @@ void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT
void RelocInfo::Verify(Isolate* isolate) {
switch (rmode_) {
case EMBEDDED_OBJECT:
Object::VerifyPointer(target_object());
Object::VerifyPointer(isolate, target_object());
break;
case CODE_TARGET: {
case CODE_TARGET:
case RELATIVE_CODE_TARGET: {
// convert inline target address to code object
Address addr = target_address();
CHECK_NE(addr, kNullAddress);
@ -641,10 +664,9 @@ void RelocInfo::Verify(Isolate* isolate) {
case DEOPT_ID:
case CONST_POOL:
case VENEER_POOL:
case WASM_GLOBAL_HANDLE:
case WASM_CALL:
case WASM_STUB_CALL:
case JS_TO_WASM_CALL:
case WASM_CODE_TABLE_ENTRY:
case NONE:
break;
case NUMBER_OF_MODES:
@ -888,10 +910,35 @@ void Assembler::DataAlign(int m) {
}
}
void Assembler::RequestHeapObject(HeapObjectRequest request) {
void AssemblerBase::RequestHeapObject(HeapObjectRequest request) {
request.set_offset(pc_offset());
heap_object_requests_.push_front(request);
}
int AssemblerBase::AddCodeTarget(Handle<Code> target) {
int current = static_cast<int>(code_targets_.size());
if (current > 0 && !target.is_null() &&
code_targets_.back().address() == target.address()) {
// Optimization if we keep jumping to the same code target.
return current - 1;
} else {
code_targets_.push_back(target);
return current;
}
}
Handle<Code> AssemblerBase::GetCodeTarget(intptr_t code_target_index) const {
DCHECK_LE(0, code_target_index);
DCHECK_LT(code_target_index, code_targets_.size());
return code_targets_[code_target_index];
}
void AssemblerBase::UpdateCodeTarget(intptr_t code_target_index,
Handle<Code> code) {
DCHECK_LE(0, code_target_index);
DCHECK_LT(code_target_index, code_targets_.size());
code_targets_[code_target_index] = code;
}
} // namespace internal
} // namespace v8

View File

@ -60,6 +60,7 @@ class ApiFunction;
namespace internal {
// Forward declarations.
class EmbeddedData;
class InstructionStream;
class Isolate;
class SCTableReference;
@ -78,12 +79,57 @@ class JumpOptimizationInfo {
bool is_optimizable() const { return optimizable_; }
void set_optimizable() { optimizable_ = true; }
// Used to verify the instruction sequence is always the same in two stages.
size_t hash_code() const { return hash_code_; }
void set_hash_code(size_t hash_code) { hash_code_ = hash_code; }
std::vector<uint32_t>& farjmp_bitmap() { return farjmp_bitmap_; }
private:
enum { kCollection, kOptimization } stage_ = kCollection;
bool optimizable_ = false;
std::vector<uint32_t> farjmp_bitmap_;
size_t hash_code_ = 0u;
};
class HeapObjectRequest {
public:
explicit HeapObjectRequest(double heap_number, int offset = -1);
explicit HeapObjectRequest(CodeStub* code_stub, int offset = -1);
enum Kind { kHeapNumber, kCodeStub };
Kind kind() const { return kind_; }
double heap_number() const {
DCHECK_EQ(kind(), kHeapNumber);
return value_.heap_number;
}
CodeStub* code_stub() const {
DCHECK_EQ(kind(), kCodeStub);
return value_.code_stub;
}
// The code buffer offset at the time of the request.
int offset() const {
DCHECK_GE(offset_, 0);
return offset_;
}
void set_offset(int offset) {
DCHECK_LT(offset_, 0);
offset_ = offset;
DCHECK_GE(offset_, 0);
}
private:
Kind kind_;
union {
double heap_number;
CodeStub* code_stub;
} value_;
int offset_;
};
// -----------------------------------------------------------------------------
@ -91,26 +137,46 @@ class JumpOptimizationInfo {
enum class CodeObjectRequired { kNo, kYes };
struct V8_EXPORT_PRIVATE AssemblerOptions {
// Recording reloc info for external references and off-heap targets is
// needed whenever code is serialized, e.g. into the snapshot or as a WASM
// module. This flag allows this reloc info to be disabled for code that
// will not survive process destruction.
bool record_reloc_info_for_serialization = true;
// Recording reloc info can be disabled wholesale. This is needed when the
// assembler is used on existing code directly (e.g. JumpTableAssembler)
// without any buffer to hold reloc information.
bool disable_reloc_info_for_patching = false;
// Enables access to exrefs by computing a delta from the root array.
// Only valid if code will not survive the process.
bool enable_root_array_delta_access = false;
// Enables specific assembler sequences only used for the simulator.
bool enable_simulator_code = false;
// Enables use of isolate-independent constants, indirected through the
// root array.
// (macro assembler feature).
bool isolate_independent_code = false;
// Enables the use of isolate-independent builtins through an off-heap
// trampoline. (macro assembler feature).
bool inline_offheap_trampolines = false;
// On some platforms, all code is within a given range in the process,
// and the start of this range is configured here.
Address code_range_start = 0;
// Enable pc-relative calls/jumps on platforms that support it. When setting
// this flag, the code range must be small enough to fit all offsets into
// the instruction immediates.
bool use_pc_relative_calls_and_jumps = false;
class AssemblerBase: public Malloced {
static AssemblerOptions Default(
Isolate* isolate, bool explicitly_support_serialization = false);
};
class AssemblerBase : public Malloced {
public:
struct IsolateData {
explicit IsolateData(Isolate* isolate);
IsolateData(const IsolateData&) = default;
bool serializer_enabled_;
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
Address code_range_start_;
#endif
};
AssemblerBase(IsolateData isolate_data, void* buffer, int buffer_size);
AssemblerBase(const AssemblerOptions& options, void* buffer, int buffer_size);
virtual ~AssemblerBase();
IsolateData isolate_data() const { return isolate_data_; }
bool serializer_enabled() const { return isolate_data_.serializer_enabled_; }
void enable_serializer() { isolate_data_.serializer_enabled_ = true; }
const AssemblerOptions& options() const { return options_; }
bool emit_debug_code() const { return emit_debug_code_; }
void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
@ -167,12 +233,30 @@ class AssemblerBase: public Malloced {
return FlushICache(reinterpret_cast<void*>(start), size);
}
// Used to print the name of some special registers.
static const char* GetSpecialRegisterName(int code) { return "UNKNOWN"; }
protected:
// Add 'target' to the {code_targets_} vector, if necessary, and return the
// offset at which it is stored.
int AddCodeTarget(Handle<Code> target);
Handle<Code> GetCodeTarget(intptr_t code_target_index) const;
// Update to the code target at {code_target_index} to {target}.
void UpdateCodeTarget(intptr_t code_target_index, Handle<Code> target);
// Reserves space in the code target vector.
void ReserveCodeTargetSpace(size_t num_of_code_targets) {
code_targets_.reserve(num_of_code_targets);
}
// The buffer into which code and relocation info are generated. It could
// either be owned by the assembler or be provided externally.
byte* buffer_;
int buffer_size_;
bool own_buffer_;
std::forward_list<HeapObjectRequest> heap_object_requests_;
// The program counter, which points into the buffer above and moves forward.
// TODO(jkummerow): This should probably have type {Address}.
byte* pc_;
void set_constant_pool_available(bool available) {
if (FLAG_enable_embedded_constant_pool) {
@ -183,12 +267,23 @@ class AssemblerBase: public Malloced {
}
}
// The program counter, which points into the buffer above and moves forward.
// TODO(jkummerow): This should probably have type {Address}.
byte* pc_;
// {RequestHeapObject} records the need for a future heap number allocation or
// code stub generation. After code assembly, each platform's
// {Assembler::AllocateAndInstallRequestedHeapObjects} will allocate these
// objects and place them where they are expected (determined by the pc offset
// associated with each request).
void RequestHeapObject(HeapObjectRequest request);
private:
IsolateData isolate_data_;
// Before we copy code into the code space, we sometimes cannot encode
// call/jump code targets as we normally would, as the difference between the
// instruction's location in the temporary buffer and the call target is not
// guaranteed to fit in the instruction's offset field. We keep track of the
// code handles we encounter in calls in this vector, and encode the index of
// the code handle in the vector instead.
std::vector<Handle<Code>> code_targets_;
const AssemblerOptions options_;
uint64_t enabled_cpu_features_;
bool emit_debug_code_;
bool predictable_code_size_;
@ -340,12 +435,6 @@ enum ICacheFlushMode { FLUSH_ICACHE_IF_NEEDED, SKIP_ICACHE_FLUSH };
class RelocInfo {
public:
enum Flag : uint8_t {
kNoFlags = 0,
kInNativeWasmCode = 1u << 0, // Reloc info belongs to native wasm code.
};
typedef base::Flags<Flag> Flags;
// This string is used to add padding comments to the reloc info in cases
// where we are not sure to have enough space for patching in during
// lazy deoptimization. This is the case if we have indirect calls for which
@ -363,12 +452,16 @@ class RelocInfo {
static const int kMaxSmallPCDelta;
enum Mode : int8_t {
// Please note the order is important (see IsCodeTarget, IsGCRelocMode).
// Please note the order is important (see IsRealRelocMode, IsGCRelocMode,
// and IsShareableRelocMode predicates below).
CODE_TARGET,
EMBEDDED_OBJECT,
WASM_GLOBAL_HANDLE,
WASM_CALL,
RELATIVE_CODE_TARGET, // LAST_CODE_TARGET_MODE
EMBEDDED_OBJECT, // LAST_GCED_ENUM
JS_TO_WASM_CALL,
WASM_CALL, // FIRST_SHAREABLE_RELOC_MODE
WASM_STUB_CALL,
RUNTIME_ENTRY,
COMMENT,
@ -396,32 +489,43 @@ class RelocInfo {
// cannot be encoded as part of another record.
PC_JUMP,
// Points to a wasm code table entry.
WASM_CODE_TABLE_ENTRY,
// Pseudo-types
NUMBER_OF_MODES,
NONE, // never recorded value
LAST_CODE_TARGET_MODE = RELATIVE_CODE_TARGET,
FIRST_REAL_RELOC_MODE = CODE_TARGET,
LAST_REAL_RELOC_MODE = VENEER_POOL,
LAST_CODE_ENUM = CODE_TARGET,
LAST_GCED_ENUM = EMBEDDED_OBJECT,
FIRST_SHAREABLE_RELOC_MODE = RUNTIME_ENTRY,
FIRST_SHAREABLE_RELOC_MODE = WASM_CALL,
};
STATIC_ASSERT(NUMBER_OF_MODES <= kBitsPerInt);
RelocInfo() = default;
RelocInfo(Address pc, Mode rmode, intptr_t data, Code* host)
: pc_(pc), rmode_(rmode), data_(data), host_(host) {}
RelocInfo(Address pc, Mode rmode, intptr_t data, Code* host,
Address constant_pool = kNullAddress)
: pc_(pc),
rmode_(rmode),
data_(data),
host_(host),
constant_pool_(constant_pool) {}
static inline bool IsRealRelocMode(Mode mode) {
return mode >= FIRST_REAL_RELOC_MODE && mode <= LAST_REAL_RELOC_MODE;
}
static inline bool IsCodeTarget(Mode mode) {
return mode <= LAST_CODE_ENUM;
// Is the relocation mode affected by GC?
static inline bool IsGCRelocMode(Mode mode) { return mode <= LAST_GCED_ENUM; }
static inline bool IsShareableRelocMode(Mode mode) {
return mode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE;
}
static inline bool IsCodeTarget(Mode mode) { return mode == CODE_TARGET; }
static inline bool IsCodeTargetMode(Mode mode) {
return mode <= LAST_CODE_TARGET_MODE;
}
static inline bool IsRelativeCodeTarget(Mode mode) {
return mode == RELATIVE_CODE_TARGET;
}
static inline bool IsEmbeddedObject(Mode mode) {
return mode == EMBEDDED_OBJECT;
@ -430,9 +534,8 @@ class RelocInfo {
return mode == RUNTIME_ENTRY;
}
static inline bool IsWasmCall(Mode mode) { return mode == WASM_CALL; }
// Is the relocation mode affected by GC?
static inline bool IsGCRelocMode(Mode mode) {
return mode <= LAST_GCED_ENUM;
static inline bool IsWasmStubCall(Mode mode) {
return mode == WASM_STUB_CALL;
}
static inline bool IsComment(Mode mode) {
return mode == COMMENT;
@ -469,8 +572,11 @@ class RelocInfo {
return IsWasmPtrReference(mode);
}
static inline bool IsWasmPtrReference(Mode mode) {
return mode == WASM_GLOBAL_HANDLE || mode == WASM_CALL ||
mode == JS_TO_WASM_CALL;
return mode == WASM_CALL || mode == JS_TO_WASM_CALL;
}
static inline bool IsOnlyForSerializer(Mode mode) {
return mode == EXTERNAL_REFERENCE || mode == OFF_HEAP_TARGET;
}
static constexpr int ModeMask(Mode mode) { return 1 << mode; }
@ -481,15 +587,12 @@ class RelocInfo {
intptr_t data() const { return data_; }
Code* host() const { return host_; }
Address constant_pool() const { return constant_pool_; }
void set_constant_pool(Address constant_pool) {
constant_pool_ = constant_pool;
}
// Apply a relocation by delta bytes. When the code object is moved, PC
// relative addresses have to be updated as well as absolute addresses
// inside the code (internal references).
// Do not forget to flush the icache afterwards!
INLINE(void apply(intptr_t delta));
V8_INLINE void apply(intptr_t delta);
// Is the pointer this relocation info refers to coded like a plain pointer
// or is it strange in some way (e.g. relative or patched into a series of
@ -504,50 +607,55 @@ class RelocInfo {
// constant pool, otherwise the pointer is embedded in the instruction stream.
bool IsInConstantPool();
Address global_handle() const;
Address js_to_wasm_address() const;
// Returns the deoptimization id for the entry associated with the reloc info
// where {kind} is the deoptimization kind.
// This is only used for printing RUNTIME_ENTRY relocation info.
int GetDeoptimizationId(Isolate* isolate, DeoptimizeKind kind);
Address wasm_call_address() const;
Address wasm_stub_call_address() const;
Address js_to_wasm_address() const;
uint32_t wasm_call_tag() const;
void set_wasm_call_address(
Address, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void set_wasm_stub_call_address(
Address, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void set_js_to_wasm_address(
Address, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void set_target_address(
Address target,
WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void set_global_handle(Address address, ICacheFlushMode icache_flush_mode =
FLUSH_ICACHE_IF_NEEDED);
void set_wasm_call_address(
Address, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void set_js_to_wasm_address(
Address, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// this relocation applies to;
// can only be called if IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
INLINE(Address target_address());
INLINE(HeapObject* target_object());
INLINE(Handle<HeapObject> target_object_handle(Assembler* origin));
INLINE(void set_target_object(
HeapObject* target,
V8_INLINE Address target_address();
V8_INLINE HeapObject* target_object();
V8_INLINE Handle<HeapObject> target_object_handle(Assembler* origin);
V8_INLINE void set_target_object(
Heap* heap, HeapObject* target,
WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
INLINE(Address target_runtime_entry(Assembler* origin));
INLINE(void set_target_runtime_entry(
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
V8_INLINE Address target_runtime_entry(Assembler* origin);
V8_INLINE void set_target_runtime_entry(
Address target,
WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
INLINE(Address target_off_heap_target());
INLINE(Cell* target_cell());
INLINE(Handle<Cell> target_cell_handle());
INLINE(void set_target_cell(
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
V8_INLINE Address target_off_heap_target();
V8_INLINE Cell* target_cell();
V8_INLINE Handle<Cell> target_cell_handle();
V8_INLINE void set_target_cell(
Cell* cell, WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
INLINE(void set_wasm_code_table_entry(
Address, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
INLINE(void set_target_external_reference(
Address, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
V8_INLINE void set_target_external_reference(
Address, ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// Returns the address of the constant pool entry where the target address
// is held. This should only be called if IsInConstantPool returns true.
INLINE(Address constant_pool_entry_address());
V8_INLINE Address constant_pool_entry_address();
// Read the address of the word containing the target_address in an
// instruction stream. What this means exactly is architecture-independent.
@ -555,7 +663,7 @@ class RelocInfo {
// The serializer uses it to find out how many raw bytes of instruction to
// output before the next target. Architecture-independent code shouldn't
// dereference the pointer it gets back from this.
INLINE(Address target_address_address());
V8_INLINE Address target_address_address();
// This indicates how much space a target takes up when deserializing a code
// stream. For most architectures this is just the size of a pointer. For
@ -566,23 +674,23 @@ class RelocInfo {
// should return the end of the instructions to be patched, allowing the
// deserializer to deserialize the instructions as raw bytes and put them in
// place, ready to be patched with the target.
INLINE(int target_address_size());
V8_INLINE int target_address_size();
// Read the reference in the instruction this relocation
// applies to; can only be called if rmode_ is EXTERNAL_REFERENCE.
INLINE(Address target_external_reference());
V8_INLINE Address target_external_reference();
// Read the reference in the instruction this relocation
// applies to; can only be called if rmode_ is INTERNAL_REFERENCE.
INLINE(Address target_internal_reference());
V8_INLINE Address target_internal_reference();
// Return the reference address this relocation applies to;
// can only be called if rmode_ is INTERNAL_REFERENCE.
INLINE(Address target_internal_reference_address());
V8_INLINE Address target_internal_reference_address();
// Wipe out a relocation to a fixed value, used for making snapshots
// reproducible.
INLINE(void WipeOut());
V8_INLINE void WipeOut();
template <typename ObjectVisitor>
inline void Visit(ObjectVisitor* v);
@ -602,16 +710,9 @@ class RelocInfo {
void Verify(Isolate* isolate);
#endif
static const int kCodeTargetMask = (1 << (LAST_CODE_ENUM + 1)) - 1;
static const int kApplyMask; // Modes affected by apply. Depends on arch.
private:
void set_embedded_address(Address address, ICacheFlushMode flush_mode);
void set_embedded_size(uint32_t size, ICacheFlushMode flush_mode);
uint32_t embedded_size() const;
Address embedded_address() const;
// On ARM/ARM64, note that pc_ is the address of the instruction referencing
// the constant pool and not the address of the constant pool entry.
Address pc_;
@ -619,7 +720,6 @@ class RelocInfo {
intptr_t data_ = 0;
Code* host_;
Address constant_pool_ = kNullAddress;
Flags flags_;
friend class RelocIterator;
};
@ -679,6 +779,8 @@ class RelocIterator: public Malloced {
// Relocation information with mode k is included in the
// iteration iff bit k of mode_mask is set.
explicit RelocIterator(Code* code, int mode_mask = -1);
explicit RelocIterator(EmbeddedData* embedded_data, Code* code,
int mode_mask);
explicit RelocIterator(const CodeDesc& desc, int mode_mask = -1);
explicit RelocIterator(const CodeReference code_reference,
int mode_mask = -1);
@ -877,46 +979,6 @@ class ConstantPoolBuilder BASE_EMBEDDED {
PerTypeEntryInfo info_[ConstantPoolEntry::NUMBER_OF_TYPES];
};
class HeapObjectRequest {
public:
explicit HeapObjectRequest(double heap_number, int offset = -1);
explicit HeapObjectRequest(CodeStub* code_stub, int offset = -1);
enum Kind { kHeapNumber, kCodeStub };
Kind kind() const { return kind_; }
double heap_number() const {
DCHECK_EQ(kind(), kHeapNumber);
return value_.heap_number;
}
CodeStub* code_stub() const {
DCHECK_EQ(kind(), kCodeStub);
return value_.code_stub;
}
// The code buffer offset at the time of the request.
int offset() const {
DCHECK_GE(offset_, 0);
return offset_;
}
void set_offset(int offset) {
DCHECK_LT(offset_, 0);
offset_ = offset;
DCHECK_GE(offset_, 0);
}
private:
Kind kind_;
union {
double heap_number;
CodeStub* code_stub;
} value_;
int offset_;
};
// Base type for CPU Registers.
//
// 1) We would prefer to use an enum for registers, but enum values are

View File

@ -41,7 +41,7 @@ class AstTraversalVisitor : public AstVisitor<Subclass> {
// Iteration left-to-right.
void VisitDeclarations(Declaration::List* declarations);
void VisitStatements(ZoneList<Statement*>* statements);
void VisitStatements(ZonePtrList<Statement>* statements);
// Individual nodes
#define DECLARE_VISIT(type) void Visit##type(type* node);
@ -112,7 +112,7 @@ void AstTraversalVisitor<Subclass>::VisitDeclarations(
template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitStatements(
ZoneList<Statement*>* stmts) {
ZonePtrList<Statement>* stmts) {
for (int i = 0; i < stmts->length(); ++i) {
Statement* stmt = stmts->at(i);
RECURSE(Visit(stmt));
@ -198,14 +198,14 @@ void AstTraversalVisitor<Subclass>::VisitSwitchStatement(
PROCESS_NODE(stmt);
RECURSE(Visit(stmt->tag()));
ZoneList<CaseClause*>* clauses = stmt->cases();
ZonePtrList<CaseClause>* clauses = stmt->cases();
for (int i = 0; i < clauses->length(); ++i) {
CaseClause* clause = clauses->at(i);
if (!clause->is_default()) {
Expression* label = clause->label();
RECURSE(Visit(label));
}
ZoneList<Statement*>* stmts = clause->statements();
ZonePtrList<Statement>* stmts = clause->statements();
RECURSE(VisitStatements(stmts));
}
}
@ -330,7 +330,7 @@ void AstTraversalVisitor<Subclass>::VisitRegExpLiteral(RegExpLiteral* expr) {
template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitObjectLiteral(ObjectLiteral* expr) {
PROCESS_EXPRESSION(expr);
ZoneList<ObjectLiteralProperty*>* props = expr->properties();
ZonePtrList<ObjectLiteralProperty>* props = expr->properties();
for (int i = 0; i < props->length(); ++i) {
ObjectLiteralProperty* prop = props->at(i);
RECURSE_EXPRESSION(Visit(prop->key()));
@ -341,7 +341,7 @@ void AstTraversalVisitor<Subclass>::VisitObjectLiteral(ObjectLiteral* expr) {
template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitArrayLiteral(ArrayLiteral* expr) {
PROCESS_EXPRESSION(expr);
ZoneList<Expression*>* values = expr->values();
ZonePtrList<Expression>* values = expr->values();
for (int i = 0; i < values->length(); ++i) {
Expression* value = values->at(i);
RECURSE_EXPRESSION(Visit(value));
@ -404,7 +404,7 @@ template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitCall(Call* expr) {
PROCESS_EXPRESSION(expr);
RECURSE_EXPRESSION(Visit(expr->expression()));
ZoneList<Expression*>* args = expr->arguments();
ZonePtrList<Expression>* args = expr->arguments();
for (int i = 0; i < args->length(); ++i) {
Expression* arg = args->at(i);
RECURSE_EXPRESSION(Visit(arg));
@ -415,7 +415,7 @@ template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitCallNew(CallNew* expr) {
PROCESS_EXPRESSION(expr);
RECURSE_EXPRESSION(Visit(expr->expression()));
ZoneList<Expression*>* args = expr->arguments();
ZonePtrList<Expression>* args = expr->arguments();
for (int i = 0; i < args->length(); ++i) {
Expression* arg = args->at(i);
RECURSE_EXPRESSION(Visit(arg));
@ -425,7 +425,7 @@ void AstTraversalVisitor<Subclass>::VisitCallNew(CallNew* expr) {
template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitCallRuntime(CallRuntime* expr) {
PROCESS_EXPRESSION(expr);
ZoneList<Expression*>* args = expr->arguments();
ZonePtrList<Expression>* args = expr->arguments();
for (int i = 0; i < args->length(); ++i) {
Expression* arg = args->at(i);
RECURSE_EXPRESSION(Visit(arg));
@ -487,7 +487,7 @@ void AstTraversalVisitor<Subclass>::VisitClassLiteral(ClassLiteral* expr) {
if (expr->instance_fields_initializer_function() != nullptr) {
RECURSE_EXPRESSION(Visit(expr->instance_fields_initializer_function()));
}
ZoneList<ClassLiteralProperty*>* props = expr->properties();
ZonePtrList<ClassLiteral::Property>* props = expr->properties();
for (int i = 0; i < props->length(); ++i) {
ClassLiteralProperty* prop = props->at(i);
if (!prop->key()->IsLiteral()) {
@ -501,7 +501,7 @@ template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitInitializeClassFieldsStatement(
InitializeClassFieldsStatement* stmt) {
PROCESS_NODE(stmt);
ZoneList<ClassLiteralProperty*>* props = stmt->fields();
ZonePtrList<ClassLiteral::Property>* props = stmt->fields();
for (int i = 0; i < props->length(); ++i) {
ClassLiteralProperty* prop = props->at(i);
if (!prop->key()->IsLiteral()) {

View File

@ -182,7 +182,7 @@ std::forward_list<const AstRawString*> AstConsString::ToRawStrings() const {
return result;
}
AstStringConstants::AstStringConstants(Isolate* isolate, uint32_t hash_seed)
AstStringConstants::AstStringConstants(Isolate* isolate, uint64_t hash_seed)
: zone_(isolate->allocator(), ZONE_NAME),
string_table_(AstRawString::Compare),
hash_seed_(hash_seed) {

View File

@ -240,14 +240,14 @@ class AstBigInt {
class AstStringConstants final {
public:
AstStringConstants(Isolate* isolate, uint32_t hash_seed);
AstStringConstants(Isolate* isolate, uint64_t hash_seed);
#define F(name, str) \
const AstRawString* name##_string() const { return name##_string_; }
AST_STRING_CONSTANTS(F)
#undef F
uint32_t hash_seed() const { return hash_seed_; }
uint64_t hash_seed() const { return hash_seed_; }
const base::CustomMatcherHashMap* string_table() const {
return &string_table_;
}
@ -255,7 +255,7 @@ class AstStringConstants final {
private:
Zone zone_;
base::CustomMatcherHashMap string_table_;
uint32_t hash_seed_;
uint64_t hash_seed_;
#define F(name, str) AstRawString* name##_string_;
AST_STRING_CONSTANTS(F)
@ -267,7 +267,7 @@ class AstStringConstants final {
class AstValueFactory {
public:
AstValueFactory(Zone* zone, const AstStringConstants* string_constants,
uint32_t hash_seed)
uint64_t hash_seed)
: string_table_(string_constants->string_table()),
strings_(nullptr),
strings_end_(&strings_),
@ -354,7 +354,7 @@ class AstValueFactory {
Zone* zone_;
uint32_t hash_seed_;
uint64_t hash_seed_;
};
} // namespace internal
} // namespace v8

121
deps/v8/src/ast/ast.cc vendored
View File

@ -7,7 +7,6 @@
#include <cmath> // For isfinite.
#include <vector>
#include "src/ast/compile-time-value.h"
#include "src/ast/prettyprinter.h"
#include "src/ast/scopes.h"
#include "src/base/hashmap.h"
@ -19,6 +18,7 @@
#include "src/double.h"
#include "src/elements.h"
#include "src/objects-inl.h"
#include "src/objects/literal-objects-inl.h"
#include "src/objects/literal-objects.h"
#include "src/objects/map.h"
#include "src/property-details.h"
@ -114,6 +114,13 @@ bool Expression::IsTheHoleLiteral() const {
return IsLiteral() && AsLiteral()->type() == Literal::kTheHole;
}
bool Expression::IsCompileTimeValue() {
if (IsLiteral()) return true;
MaterializedLiteral* literal = AsMaterializedLiteral();
if (literal == nullptr) return false;
return literal->IsSimple();
}
bool Expression::IsUndefinedLiteral() const {
if (IsLiteral() && AsLiteral()->type() == Literal::kUndefined) return true;
@ -334,8 +341,7 @@ ClassLiteralProperty::ClassLiteralProperty(Expression* key, Expression* value,
bool ObjectLiteral::Property::IsCompileTimeValue() const {
return kind_ == CONSTANT ||
(kind_ == MATERIALIZED_LITERAL &&
CompileTimeValue::IsCompileTimeValue(value_));
(kind_ == MATERIALIZED_LITERAL && value_->IsCompileTimeValue());
}
@ -360,19 +366,37 @@ void ObjectLiteral::CalculateEmitStore(Zone* zone) {
Literal* literal = property->key()->AsLiteral();
DCHECK(!literal->IsNullLiteral());
// If there is an existing entry do not emit a store unless the previous
// entry was also an accessor.
uint32_t hash = literal->Hash();
ZoneHashMap::Entry* entry = table.LookupOrInsert(literal, hash, allocator);
if (entry->value != nullptr) {
auto previous_kind =
if (entry->value == nullptr) {
entry->value = property;
} else {
// We already have a later definition of this property, so we don't need
// to emit a store for the current one.
//
// There are two subtleties here.
//
// (1) Emitting a store might actually be incorrect. For example, in {get
// foo() {}, foo: 42}, the getter store would override the data property
// (which, being a non-computed compile-time valued property, is already
// part of the initial literal object.
//
// (2) If the later definition is an accessor (say, a getter), and the
// current definition is a complementary accessor (here, a setter), then
// we still must emit a store for the current definition.
auto later_kind =
static_cast<ObjectLiteral::Property*>(entry->value)->kind();
if (!((property->kind() == GETTER && previous_kind == SETTER) ||
(property->kind() == SETTER && previous_kind == GETTER))) {
bool complementary_accessors =
(property->kind() == GETTER && later_kind == SETTER) ||
(property->kind() == SETTER && later_kind == GETTER);
if (!complementary_accessors) {
property->set_emit_store(false);
if (later_kind == GETTER || later_kind == SETTER) {
entry->value = property;
}
}
}
entry->value = property;
}
}
@ -427,7 +451,7 @@ int ObjectLiteral::InitDepthAndFlags() {
Literal* key = property->key()->AsLiteral();
Expression* value = property->value();
bool is_compile_time_value = CompileTimeValue::IsCompileTimeValue(value);
bool is_compile_time_value = value->IsCompileTimeValue();
is_simple = is_simple && is_compile_time_value;
// Keep track of the number of elements in the object literal and
@ -454,8 +478,8 @@ int ObjectLiteral::InitDepthAndFlags() {
return depth_acc;
}
void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
if (!constant_properties_.is_null()) return;
void ObjectLiteral::BuildBoilerplateDescription(Isolate* isolate) {
if (!boilerplate_description_.is_null()) return;
int index_keys = 0;
bool has_seen_proto = false;
@ -476,17 +500,17 @@ void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
}
}
Handle<BoilerplateDescription> constant_properties =
isolate->factory()->NewBoilerplateDescription(boilerplate_properties_,
properties()->length(),
index_keys, has_seen_proto);
Handle<ObjectBoilerplateDescription> boilerplate_description =
isolate->factory()->NewObjectBoilerplateDescription(
boilerplate_properties_, properties()->length(), index_keys,
has_seen_proto);
int position = 0;
for (int i = 0; i < properties()->length(); i++) {
ObjectLiteral::Property* property = properties()->at(i);
if (property->IsPrototype()) continue;
if (static_cast<uint32_t>(position) == boilerplate_properties_ * 2) {
if (static_cast<uint32_t>(position) == boilerplate_properties_) {
DCHECK(property->is_computed_name());
break;
}
@ -510,11 +534,12 @@ void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
Handle<Object> value = GetBoilerplateValue(property->value(), isolate);
// Add name, value pair to the fixed array.
constant_properties->set(position++, *key);
constant_properties->set(position++, *value);
boilerplate_description->set_key_value(position++, *key, *value);
}
constant_properties_ = constant_properties;
boilerplate_description->set_flags(EncodeLiteralType());
boilerplate_description_ = boilerplate_description;
}
bool ObjectLiteral::IsFastCloningSupported() const {
@ -528,8 +553,8 @@ bool ObjectLiteral::IsFastCloningSupported() const {
bool ArrayLiteral::is_empty() const {
DCHECK(is_initialized());
return values()->is_empty() &&
(constant_elements().is_null() || constant_elements()->is_empty());
return values()->is_empty() && (boilerplate_description().is_null() ||
boilerplate_description()->is_empty());
}
int ArrayLiteral::InitDepthAndFlags() {
@ -550,7 +575,7 @@ int ArrayLiteral::InitDepthAndFlags() {
if (subliteral_depth > depth_acc) depth_acc = subliteral_depth;
}
if (!CompileTimeValue::IsCompileTimeValue(element)) {
if (!element->IsCompileTimeValue()) {
is_simple = false;
}
}
@ -563,8 +588,8 @@ int ArrayLiteral::InitDepthAndFlags() {
return depth_acc;
}
void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
if (!constant_elements_.is_null()) return;
void ArrayLiteral::BuildBoilerplateDescription(Isolate* isolate) {
if (!boilerplate_description_.is_null()) return;
int constants_length =
first_spread_index_ >= 0 ? first_spread_index_ : values()->length();
@ -606,7 +631,7 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
// elements array to a copy-on-write array.
if (is_simple() && depth() == 1 && array_index > 0 &&
IsSmiOrObjectElementsKind(kind)) {
fixed_array->set_map(isolate->heap()->fixed_cow_array_map());
fixed_array->set_map(ReadOnlyRoots(isolate).fixed_cow_array_map());
}
Handle<FixedArrayBase> elements = fixed_array;
@ -615,14 +640,12 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
elements = isolate->factory()->NewFixedDoubleArray(constants_length);
// We are copying from non-fast-double to fast-double.
ElementsKind from_kind = TERMINAL_FAST_ELEMENTS_KIND;
accessor->CopyElements(fixed_array, from_kind, elements, constants_length);
accessor->CopyElements(isolate, fixed_array, from_kind, elements,
constants_length);
}
// Remember both the literal's constant values as well as the ElementsKind.
Handle<ConstantElementsPair> literals =
isolate->factory()->NewConstantElementsPair(kind, elements);
constant_elements_ = literals;
boilerplate_description_ =
isolate->factory()->NewArrayBoilerplateDescription(kind, elements);
}
bool ArrayLiteral::IsFastCloningSupported() const {
@ -643,8 +666,17 @@ Handle<Object> MaterializedLiteral::GetBoilerplateValue(Expression* expression,
if (expression->IsLiteral()) {
return expression->AsLiteral()->BuildValue(isolate);
}
if (CompileTimeValue::IsCompileTimeValue(expression)) {
return CompileTimeValue::GetValue(isolate, expression);
if (expression->IsCompileTimeValue()) {
if (expression->IsObjectLiteral()) {
ObjectLiteral* object_literal = expression->AsObjectLiteral();
DCHECK(object_literal->is_simple());
return object_literal->boilerplate_description();
} else {
DCHECK(expression->IsArrayLiteral());
ArrayLiteral* array_literal = expression->AsArrayLiteral();
DCHECK(array_literal->is_simple());
return array_literal->boilerplate_description();
}
}
return isolate->factory()->uninitialized_value();
}
@ -669,10 +701,12 @@ bool MaterializedLiteral::NeedsInitialAllocationSite() {
void MaterializedLiteral::BuildConstants(Isolate* isolate) {
if (IsArrayLiteral()) {
return AsArrayLiteral()->BuildConstantElements(isolate);
AsArrayLiteral()->BuildBoilerplateDescription(isolate);
return;
}
if (IsObjectLiteral()) {
return AsObjectLiteral()->BuildConstantProperties(isolate);
AsObjectLiteral()->BuildBoilerplateDescription(isolate);
return;
}
DCHECK(IsRegExpLiteral());
}
@ -698,7 +732,7 @@ Handle<TemplateObjectDescription> GetTemplateObject::GetOrBuildDescription(
if (this->cooked_strings()->at(i) != nullptr) {
cooked_strings->set(i, *this->cooked_strings()->at(i)->string());
} else {
cooked_strings->set(i, isolate->heap()->undefined_value());
cooked_strings->set(i, ReadOnlyRoots(isolate).undefined_value());
}
}
}
@ -806,9 +840,10 @@ Call::CallType Call::GetCallType() const {
if (proxy->var()->IsUnallocated()) {
return GLOBAL_CALL;
} else if (proxy->var()->IsLookupSlot()) {
// Calls going through 'with' always use DYNAMIC rather than DYNAMIC_LOCAL
// or DYNAMIC_GLOBAL.
return proxy->var()->mode() == DYNAMIC ? WITH_CALL : OTHER_CALL;
// Calls going through 'with' always use VariableMode::kDynamic rather
// than VariableMode::kDynamicLocal or VariableMode::kDynamicGlobal.
return proxy->var()->mode() == VariableMode::kDynamic ? WITH_CALL
: OTHER_CALL;
}
}
@ -831,7 +866,7 @@ Call::CallType Call::GetCallType() const {
return OTHER_CALL;
}
CaseClause::CaseClause(Expression* label, ZoneList<Statement*>* statements)
CaseClause::CaseClause(Expression* label, ZonePtrList<Statement>* statements)
: label_(label), statements_(statements) {}
bool Literal::IsPropertyName() const {
@ -954,7 +989,7 @@ const char* CallRuntime::debug_name() {
case k##NodeType: \
return static_cast<const NodeType*>(this)->labels();
ZoneList<const AstRawString*>* BreakableStatement::labels() const {
ZonePtrList<const AstRawString>* BreakableStatement::labels() const {
switch (node_type()) {
BREAKABLE_NODE_LIST(RETURN_LABELS)
ITERATION_NODE_LIST(RETURN_LABELS)

251
deps/v8/src/ast/ast.h vendored
View File

@ -241,6 +241,8 @@ class Expression : public AstNode {
// that this also checks for loads of the global "undefined" variable.
bool IsUndefinedLiteral() const;
bool IsCompileTimeValue();
protected:
Expression(int pos, NodeType type) : AstNode(pos, type) {}
@ -255,7 +257,7 @@ class BreakableStatement : public Statement {
TARGET_FOR_NAMED_ONLY
};
ZoneList<const AstRawString*>* labels() const;
ZonePtrList<const AstRawString>* labels() const;
// Testers.
bool is_target_for_anonymous() const {
@ -277,12 +279,12 @@ class BreakableStatement : public Statement {
class Block : public BreakableStatement {
public:
ZoneList<Statement*>* statements() { return &statements_; }
ZonePtrList<Statement>* statements() { return &statements_; }
bool ignore_completion_value() const {
return IgnoreCompletionField::decode(bit_field_);
}
inline ZoneList<const AstRawString*>* labels() const;
inline ZonePtrList<const AstRawString>* labels() const;
bool IsJump() const {
return !statements_.is_empty() && statements_.last()->IsJump() &&
@ -295,7 +297,7 @@ class Block : public BreakableStatement {
private:
friend class AstNodeFactory;
ZoneList<Statement*> statements_;
ZonePtrList<Statement> statements_;
Scope* scope_;
class IgnoreCompletionField
@ -304,7 +306,7 @@ class Block : public BreakableStatement {
: public BitField<bool, IgnoreCompletionField::kNext, 1> {};
protected:
Block(Zone* zone, ZoneList<const AstRawString*>* labels, int capacity,
Block(Zone* zone, ZonePtrList<const AstRawString>* labels, int capacity,
bool ignore_completion_value)
: BreakableStatement(TARGET_FOR_NAMED_ONLY, kNoSourcePosition, kBlock),
statements_(capacity, zone),
@ -319,18 +321,18 @@ class LabeledBlock final : public Block {
friend class AstNodeFactory;
friend class Block;
LabeledBlock(Zone* zone, ZoneList<const AstRawString*>* labels, int capacity,
bool ignore_completion_value)
LabeledBlock(Zone* zone, ZonePtrList<const AstRawString>* labels,
int capacity, bool ignore_completion_value)
: Block(zone, labels, capacity, ignore_completion_value),
labels_(labels) {
DCHECK_NOT_NULL(labels);
DCHECK_GT(labels->length(), 0);
}
ZoneList<const AstRawString*>* labels_;
ZonePtrList<const AstRawString>* labels_;
};
inline ZoneList<const AstRawString*>* Block::labels() const {
inline ZonePtrList<const AstRawString>* Block::labels() const {
if (IsLabeledField::decode(bit_field_)) {
return static_cast<const LabeledBlock*>(this)->labels_;
}
@ -437,10 +439,10 @@ class IterationStatement : public BreakableStatement {
Statement* body() const { return body_; }
void set_body(Statement* s) { body_ = s; }
ZoneList<const AstRawString*>* labels() const { return labels_; }
ZonePtrList<const AstRawString>* labels() const { return labels_; }
protected:
IterationStatement(ZoneList<const AstRawString*>* labels, int pos,
IterationStatement(ZonePtrList<const AstRawString>* labels, int pos,
NodeType type)
: BreakableStatement(TARGET_FOR_ANONYMOUS, pos, type),
labels_(labels),
@ -451,7 +453,7 @@ class IterationStatement : public BreakableStatement {
BreakableStatement::kNextBitFieldIndex;
private:
ZoneList<const AstRawString*>* labels_;
ZonePtrList<const AstRawString>* labels_;
Statement* body_;
};
@ -468,7 +470,7 @@ class DoWhileStatement final : public IterationStatement {
private:
friend class AstNodeFactory;
DoWhileStatement(ZoneList<const AstRawString*>* labels, int pos)
DoWhileStatement(ZonePtrList<const AstRawString>* labels, int pos)
: IterationStatement(labels, pos, kDoWhileStatement), cond_(nullptr) {}
Expression* cond_;
@ -487,7 +489,7 @@ class WhileStatement final : public IterationStatement {
private:
friend class AstNodeFactory;
WhileStatement(ZoneList<const AstRawString*>* labels, int pos)
WhileStatement(ZonePtrList<const AstRawString>* labels, int pos)
: IterationStatement(labels, pos, kWhileStatement), cond_(nullptr) {}
Expression* cond_;
@ -511,7 +513,7 @@ class ForStatement final : public IterationStatement {
private:
friend class AstNodeFactory;
ForStatement(ZoneList<const AstRawString*>* labels, int pos)
ForStatement(ZonePtrList<const AstRawString>* labels, int pos)
: IterationStatement(labels, pos, kForStatement),
init_(nullptr),
cond_(nullptr),
@ -537,7 +539,7 @@ class ForEachStatement : public IterationStatement {
}
protected:
ForEachStatement(ZoneList<const AstRawString*>* labels, int pos,
ForEachStatement(ZonePtrList<const AstRawString>* labels, int pos,
NodeType type)
: IterationStatement(labels, pos, type) {}
};
@ -564,7 +566,7 @@ class ForInStatement final : public ForEachStatement {
private:
friend class AstNodeFactory;
ForInStatement(ZoneList<const AstRawString*>* labels, int pos)
ForInStatement(ZonePtrList<const AstRawString>* labels, int pos)
: ForEachStatement(labels, pos, kForInStatement),
each_(nullptr),
subject_(nullptr) {
@ -630,7 +632,7 @@ class ForOfStatement final : public ForEachStatement {
private:
friend class AstNodeFactory;
ForOfStatement(ZoneList<const AstRawString*>* labels, int pos)
ForOfStatement(ZonePtrList<const AstRawString>* labels, int pos)
: ForEachStatement(labels, pos, kForOfStatement),
iterator_(nullptr),
assign_iterator_(nullptr),
@ -757,40 +759,40 @@ class CaseClause final : public ZoneObject {
DCHECK(!is_default());
return label_;
}
ZoneList<Statement*>* statements() const { return statements_; }
ZonePtrList<Statement>* statements() const { return statements_; }
private:
friend class AstNodeFactory;
CaseClause(Expression* label, ZoneList<Statement*>* statements);
CaseClause(Expression* label, ZonePtrList<Statement>* statements);
Expression* label_;
ZoneList<Statement*>* statements_;
ZonePtrList<Statement>* statements_;
};
class SwitchStatement final : public BreakableStatement {
public:
ZoneList<const AstRawString*>* labels() const { return labels_; }
ZonePtrList<const AstRawString>* labels() const { return labels_; }
Expression* tag() const { return tag_; }
void set_tag(Expression* t) { tag_ = t; }
ZoneList<CaseClause*>* cases() { return &cases_; }
ZonePtrList<CaseClause>* cases() { return &cases_; }
private:
friend class AstNodeFactory;
SwitchStatement(Zone* zone, ZoneList<const AstRawString*>* labels,
SwitchStatement(Zone* zone, ZonePtrList<const AstRawString>* labels,
Expression* tag, int pos)
: BreakableStatement(TARGET_FOR_ANONYMOUS, pos, kSwitchStatement),
labels_(labels),
tag_(tag),
cases_(4, zone) {}
ZoneList<const AstRawString*>* labels_;
ZonePtrList<const AstRawString>* labels_;
Expression* tag_;
ZoneList<CaseClause*> cases_;
ZonePtrList<CaseClause> cases_;
};
@ -1120,8 +1122,8 @@ class MaterializedLiteral : public Expression {
void BuildConstants(Isolate* isolate);
// If the expression is a literal, return the literal value;
// if the expression is a materialized literal and is simple return a
// compile time value as encoded by CompileTimeValue::GetValue().
// if the expression is a materialized literal and is_simple
// then return an Array or Object Boilerplate Description
// Otherwise, return undefined literal as the placeholder
// in the object literal boilerplate.
Handle<Object> GetBoilerplateValue(Expression* expression, Isolate* isolate);
@ -1275,12 +1277,12 @@ class ObjectLiteral final : public AggregateLiteral {
public:
typedef ObjectLiteralProperty Property;
Handle<BoilerplateDescription> constant_properties() const {
DCHECK(!constant_properties_.is_null());
return constant_properties_;
Handle<ObjectBoilerplateDescription> boilerplate_description() const {
DCHECK(!boilerplate_description_.is_null());
return boilerplate_description_;
}
int properties_count() const { return boilerplate_properties_; }
ZoneList<Property*>* properties() const { return properties_; }
ZonePtrList<Property>* properties() const { return properties_; }
bool has_elements() const { return HasElementsField::decode(bit_field_); }
bool has_rest_property() const {
return HasRestPropertyField::decode(bit_field_);
@ -1303,17 +1305,17 @@ class ObjectLiteral final : public AggregateLiteral {
// Populate the depth field and flags, returns the depth.
int InitDepthAndFlags();
// Get the constant properties fixed array, populating it if necessary.
Handle<BoilerplateDescription> GetOrBuildConstantProperties(
// Get the boilerplate description, populating it if necessary.
Handle<ObjectBoilerplateDescription> GetOrBuildBoilerplateDescription(
Isolate* isolate) {
if (constant_properties_.is_null()) {
BuildConstantProperties(isolate);
if (boilerplate_description_.is_null()) {
BuildBoilerplateDescription(isolate);
}
return constant_properties();
return boilerplate_description();
}
// Populate the constant properties fixed array.
void BuildConstantProperties(Isolate* isolate);
// Populate the boilerplate description.
void BuildBoilerplateDescription(Isolate* isolate);
// Mark all computed expressions that are bound to a key that
// is shadowed by a later occurrence of the same key. For the
@ -1355,7 +1357,7 @@ class ObjectLiteral final : public AggregateLiteral {
private:
friend class AstNodeFactory;
ObjectLiteral(ZoneList<Property*>* properties,
ObjectLiteral(ZonePtrList<Property>* properties,
uint32_t boilerplate_properties, int pos,
bool has_rest_property)
: AggregateLiteral(pos, kObjectLiteral),
@ -1380,7 +1382,7 @@ class ObjectLiteral final : public AggregateLiteral {
}
uint32_t boilerplate_properties_;
Handle<BoilerplateDescription> constant_properties_;
Handle<ObjectBoilerplateDescription> boilerplate_description_;
ZoneList<Property*>* properties_;
class HasElementsField
@ -1423,11 +1425,11 @@ class AccessorTable
// for minimizing the work when constructing it at runtime.
class ArrayLiteral final : public AggregateLiteral {
public:
Handle<ConstantElementsPair> constant_elements() const {
return constant_elements_;
Handle<ArrayBoilerplateDescription> boilerplate_description() const {
return boilerplate_description_;
}
ZoneList<Expression*>* values() const { return values_; }
ZonePtrList<Expression>* values() const { return values_; }
int first_spread_index() const { return first_spread_index_; }
@ -1436,16 +1438,17 @@ class ArrayLiteral final : public AggregateLiteral {
// Populate the depth field and flags, returns the depth.
int InitDepthAndFlags();
// Get the constant elements fixed array, populating it if necessary.
Handle<ConstantElementsPair> GetOrBuildConstantElements(Isolate* isolate) {
if (constant_elements_.is_null()) {
BuildConstantElements(isolate);
// Get the boilerplate description, populating it if necessary.
Handle<ArrayBoilerplateDescription> GetOrBuildBoilerplateDescription(
Isolate* isolate) {
if (boilerplate_description_.is_null()) {
BuildBoilerplateDescription(isolate);
}
return constant_elements();
return boilerplate_description();
}
// Populate the constant elements fixed array.
void BuildConstantElements(Isolate* isolate);
// Populate the boilerplate description.
void BuildBoilerplateDescription(Isolate* isolate);
// Determines whether the {CreateShallowArrayLiteral} builtin can be used.
bool IsFastCloningSupported() const;
@ -1458,15 +1461,14 @@ class ArrayLiteral final : public AggregateLiteral {
private:
friend class AstNodeFactory;
ArrayLiteral(ZoneList<Expression*>* values, int first_spread_index, int pos)
ArrayLiteral(ZonePtrList<Expression>* values, int first_spread_index, int pos)
: AggregateLiteral(pos, kArrayLiteral),
first_spread_index_(first_spread_index),
values_(values) {
}
values_(values) {}
int first_spread_index_;
Handle<ConstantElementsPair> constant_elements_;
ZoneList<Expression*>* values_;
Handle<ArrayBoilerplateDescription> boilerplate_description_;
ZonePtrList<Expression>* values_;
};
enum class HoleCheckMode { kRequired, kElided };
@ -1633,7 +1635,7 @@ class ResolvedProperty final : public Expression {
class Call final : public Expression {
public:
Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
ZonePtrList<Expression>* arguments() const { return arguments_; }
bool is_possibly_eval() const {
return IsPossiblyEvalField::decode(bit_field_);
@ -1672,17 +1674,15 @@ class Call final : public Expression {
private:
friend class AstNodeFactory;
Call(Expression* expression, ZoneList<Expression*>* arguments, int pos,
Call(Expression* expression, ZonePtrList<Expression>* arguments, int pos,
PossiblyEval possibly_eval)
: Expression(pos, kCall),
expression_(expression),
arguments_(arguments) {
: Expression(pos, kCall), expression_(expression), arguments_(arguments) {
bit_field_ |=
IsPossiblyEvalField::encode(possibly_eval == IS_POSSIBLY_EVAL) |
IsTaggedTemplateField::encode(false);
}
Call(Expression* expression, ZoneList<Expression*>* arguments, int pos,
Call(Expression* expression, ZonePtrList<Expression>* arguments, int pos,
TaggedTemplateTag tag)
: Expression(pos, kCall), expression_(expression), arguments_(arguments) {
bit_field_ |= IsPossiblyEvalField::encode(false) |
@ -1695,14 +1695,14 @@ class Call final : public Expression {
: public BitField<bool, IsPossiblyEvalField::kNext, 1> {};
Expression* expression_;
ZoneList<Expression*>* arguments_;
ZonePtrList<Expression>* arguments_;
};
class CallNew final : public Expression {
public:
Expression* expression() const { return expression_; }
ZoneList<Expression*>* arguments() const { return arguments_; }
ZonePtrList<Expression>* arguments() const { return arguments_; }
bool only_last_arg_is_spread() {
return !arguments_->is_empty() && arguments_->last()->IsSpread();
@ -1711,14 +1711,13 @@ class CallNew final : public Expression {
private:
friend class AstNodeFactory;
CallNew(Expression* expression, ZoneList<Expression*>* arguments, int pos)
CallNew(Expression* expression, ZonePtrList<Expression>* arguments, int pos)
: Expression(pos, kCallNew),
expression_(expression),
arguments_(arguments) {
}
arguments_(arguments) {}
Expression* expression_;
ZoneList<Expression*>* arguments_;
ZonePtrList<Expression>* arguments_;
};
// The CallRuntime class does not represent any official JavaScript
@ -1727,7 +1726,7 @@ class CallNew final : public Expression {
// implemented in JavaScript.
class CallRuntime final : public Expression {
public:
ZoneList<Expression*>* arguments() const { return arguments_; }
ZonePtrList<Expression>* arguments() const { return arguments_; }
bool is_jsruntime() const { return function_ == nullptr; }
int context_index() const {
@ -1745,11 +1744,11 @@ class CallRuntime final : public Expression {
friend class AstNodeFactory;
CallRuntime(const Runtime::Function* function,
ZoneList<Expression*>* arguments, int pos)
ZonePtrList<Expression>* arguments, int pos)
: Expression(pos, kCallRuntime),
function_(function),
arguments_(arguments) {}
CallRuntime(int context_index, ZoneList<Expression*>* arguments, int pos)
CallRuntime(int context_index, ZonePtrList<Expression>* arguments, int pos)
: Expression(pos, kCallRuntime),
context_index_(context_index),
function_(nullptr),
@ -1757,7 +1756,7 @@ class CallRuntime final : public Expression {
int context_index_;
const Runtime::Function* function_;
ZoneList<Expression*>* arguments_;
ZonePtrList<Expression>* arguments_;
};
@ -2190,7 +2189,7 @@ class FunctionLiteral final : public Expression {
const AstConsString* raw_name() const { return raw_name_; }
void set_raw_name(const AstConsString* name) { raw_name_ = name; }
DeclarationScope* scope() const { return scope_; }
ZoneList<Statement*>* body() const { return body_; }
ZonePtrList<Statement>* body() const { return body_; }
void set_function_token_position(int pos) { function_token_position_ = pos; }
int function_token_position() const { return function_token_position_; }
int start_position() const;
@ -2310,7 +2309,7 @@ class FunctionLiteral final : public Expression {
FunctionLiteral(
Zone* zone, const AstRawString* name, AstValueFactory* ast_value_factory,
DeclarationScope* scope, ZoneList<Statement*>* body,
DeclarationScope* scope, ZonePtrList<Statement>* body,
int expected_property_count, int parameter_count, int function_length,
FunctionType function_type, ParameterFlag has_duplicate_parameters,
EagerCompileHint eager_compile_hint, int position, bool has_braces,
@ -2359,7 +2358,7 @@ class FunctionLiteral final : public Expression {
const AstConsString* raw_name_;
DeclarationScope* scope_;
ZoneList<Statement*>* body_;
ZonePtrList<Statement>* body_;
const AstConsString* raw_inferred_name_;
Handle<String> inferred_name_;
ProducedPreParsedScopeData* produced_preparsed_scope_data_;
@ -2407,15 +2406,16 @@ class ClassLiteralProperty final : public LiteralProperty {
class InitializeClassFieldsStatement final : public Statement {
public:
typedef ClassLiteralProperty Property;
ZoneList<Property*>* fields() const { return fields_; }
ZonePtrList<Property>* fields() const { return fields_; }
private:
friend class AstNodeFactory;
InitializeClassFieldsStatement(ZoneList<Property*>* fields, int pos)
InitializeClassFieldsStatement(ZonePtrList<Property>* fields, int pos)
: Statement(pos, kInitializeClassFieldsStatement), fields_(fields) {}
ZoneList<Property*>* fields_;
ZonePtrList<Property>* fields_;
};
class ClassLiteral final : public Expression {
@ -2426,7 +2426,7 @@ class ClassLiteral final : public Expression {
Variable* class_variable() const { return class_variable_; }
Expression* extends() const { return extends_; }
FunctionLiteral* constructor() const { return constructor_; }
ZoneList<Property*>* properties() const { return properties_; }
ZonePtrList<Property>* properties() const { return properties_; }
int start_position() const { return position(); }
int end_position() const { return end_position_; }
bool has_name_static_property() const {
@ -2455,7 +2455,7 @@ class ClassLiteral final : public Expression {
friend class AstNodeFactory;
ClassLiteral(Scope* scope, Variable* class_variable, Expression* extends,
FunctionLiteral* constructor, ZoneList<Property*>* properties,
FunctionLiteral* constructor, ZonePtrList<Property>* properties,
FunctionLiteral* static_fields_initializer,
FunctionLiteral* instance_fields_initializer_function,
int start_position, int end_position,
@ -2481,7 +2481,7 @@ class ClassLiteral final : public Expression {
Variable* class_variable_;
Expression* extends_;
FunctionLiteral* constructor_;
ZoneList<Property*>* properties_;
ZonePtrList<Property>* properties_;
FunctionLiteral* static_fields_initializer_;
FunctionLiteral* instance_fields_initializer_function_;
class HasNameStaticProperty
@ -2636,10 +2636,10 @@ class GetIterator final : public Expression {
// (defined at https://tc39.github.io/ecma262/#sec-gettemplateobject).
class GetTemplateObject final : public Expression {
public:
const ZoneList<const AstRawString*>* cooked_strings() const {
const ZonePtrList<const AstRawString>* cooked_strings() const {
return cooked_strings_;
}
const ZoneList<const AstRawString*>* raw_strings() const {
const ZonePtrList<const AstRawString>* raw_strings() const {
return raw_strings_;
}
@ -2648,34 +2648,35 @@ class GetTemplateObject final : public Expression {
private:
friend class AstNodeFactory;
GetTemplateObject(const ZoneList<const AstRawString*>* cooked_strings,
const ZoneList<const AstRawString*>* raw_strings, int pos)
GetTemplateObject(const ZonePtrList<const AstRawString>* cooked_strings,
const ZonePtrList<const AstRawString>* raw_strings, int pos)
: Expression(pos, kGetTemplateObject),
cooked_strings_(cooked_strings),
raw_strings_(raw_strings) {}
const ZoneList<const AstRawString*>* cooked_strings_;
const ZoneList<const AstRawString*>* raw_strings_;
const ZonePtrList<const AstRawString>* cooked_strings_;
const ZonePtrList<const AstRawString>* raw_strings_;
};
class TemplateLiteral final : public Expression {
public:
using StringList = ZoneList<const AstRawString*>;
using ExpressionList = ZoneList<Expression*>;
const StringList* string_parts() const { return string_parts_; }
const ExpressionList* substitutions() const { return substitutions_; }
const ZonePtrList<const AstRawString>* string_parts() const {
return string_parts_;
}
const ZonePtrList<Expression>* substitutions() const {
return substitutions_;
}
private:
friend class AstNodeFactory;
TemplateLiteral(const StringList* parts, const ExpressionList* substitutions,
int pos)
TemplateLiteral(const ZonePtrList<const AstRawString>* parts,
const ZonePtrList<Expression>* substitutions, int pos)
: Expression(pos, kTemplateLiteral),
string_parts_(parts),
substitutions_(substitutions) {}
const StringList* string_parts_;
const ExpressionList* substitutions_;
const ZonePtrList<const AstRawString>* string_parts_;
const ZonePtrList<Expression>* substitutions_;
};
// ----------------------------------------------------------------------------
@ -2692,7 +2693,7 @@ class AstVisitor BASE_EMBEDDED {
for (Declaration* decl : *declarations) Visit(decl);
}
void VisitStatements(ZoneList<Statement*>* statements) {
void VisitStatements(ZonePtrList<Statement>* statements) {
for (int i = 0; i < statements->length(); i++) {
Statement* stmt = statements->at(i);
Visit(stmt);
@ -2700,7 +2701,7 @@ class AstVisitor BASE_EMBEDDED {
}
}
void VisitExpressions(ZoneList<Expression*>* expressions) {
void VisitExpressions(ZonePtrList<Expression>* expressions) {
for (int i = 0; i < expressions->length(); i++) {
// The variable statement visiting code may pass null expressions
// to this code. Maybe this should be handled by introducing an
@ -2794,7 +2795,7 @@ class AstNodeFactory final BASE_EMBEDDED {
}
Block* NewBlock(int capacity, bool ignore_completion_value,
ZoneList<const AstRawString*>* labels = nullptr) {
ZonePtrList<const AstRawString>* labels = nullptr) {
return labels != nullptr
? new (zone_) LabeledBlock(zone_, labels, capacity,
ignore_completion_value)
@ -2802,22 +2803,22 @@ class AstNodeFactory final BASE_EMBEDDED {
Block(zone_, labels, capacity, ignore_completion_value);
}
#define STATEMENT_WITH_LABELS(NodeType) \
NodeType* New##NodeType(ZoneList<const AstRawString*>* labels, int pos) { \
return new (zone_) NodeType(labels, pos); \
#define STATEMENT_WITH_LABELS(NodeType) \
NodeType* New##NodeType(ZonePtrList<const AstRawString>* labels, int pos) { \
return new (zone_) NodeType(labels, pos); \
}
STATEMENT_WITH_LABELS(DoWhileStatement)
STATEMENT_WITH_LABELS(WhileStatement)
STATEMENT_WITH_LABELS(ForStatement)
#undef STATEMENT_WITH_LABELS
SwitchStatement* NewSwitchStatement(ZoneList<const AstRawString*>* labels,
SwitchStatement* NewSwitchStatement(ZonePtrList<const AstRawString>* labels,
Expression* tag, int pos) {
return new (zone_) SwitchStatement(zone_, labels, tag, pos);
}
ForEachStatement* NewForEachStatement(ForEachStatement::VisitMode visit_mode,
ZoneList<const AstRawString*>* labels,
ZonePtrList<const AstRawString>* labels,
int pos) {
switch (visit_mode) {
case ForEachStatement::ENUMERATE: {
@ -2830,7 +2831,7 @@ class AstNodeFactory final BASE_EMBEDDED {
UNREACHABLE();
}
ForOfStatement* NewForOfStatement(ZoneList<const AstRawString*>* labels,
ForOfStatement* NewForOfStatement(ZonePtrList<const AstRawString>* labels,
int pos) {
return new (zone_) ForOfStatement(labels, pos);
}
@ -2921,7 +2922,7 @@ class AstNodeFactory final BASE_EMBEDDED {
}
CaseClause* NewCaseClause(Expression* label,
ZoneList<Statement*>* statements) {
ZonePtrList<Statement>* statements) {
return new (zone_) CaseClause(label, statements);
}
@ -2961,7 +2962,7 @@ class AstNodeFactory final BASE_EMBEDDED {
}
ObjectLiteral* NewObjectLiteral(
ZoneList<ObjectLiteral::Property*>* properties,
ZonePtrList<ObjectLiteral::Property>* properties,
uint32_t boilerplate_properties, int pos, bool has_rest_property) {
return new (zone_) ObjectLiteral(properties, boilerplate_properties, pos,
has_rest_property);
@ -2986,12 +2987,11 @@ class AstNodeFactory final BASE_EMBEDDED {
return new (zone_) RegExpLiteral(pattern, flags, pos);
}
ArrayLiteral* NewArrayLiteral(ZoneList<Expression*>* values,
int pos) {
ArrayLiteral* NewArrayLiteral(ZonePtrList<Expression>* values, int pos) {
return new (zone_) ArrayLiteral(values, -1, pos);
}
ArrayLiteral* NewArrayLiteral(ZoneList<Expression*>* values,
ArrayLiteral* NewArrayLiteral(ZonePtrList<Expression>* values,
int first_spread_index, int pos) {
return new (zone_) ArrayLiteral(values, first_spread_index, pos);
}
@ -3027,35 +3027,34 @@ class AstNodeFactory final BASE_EMBEDDED {
return new (zone_) ResolvedProperty(obj, property, pos);
}
Call* NewCall(Expression* expression, ZoneList<Expression*>* arguments,
Call* NewCall(Expression* expression, ZonePtrList<Expression>* arguments,
int pos, Call::PossiblyEval possibly_eval = Call::NOT_EVAL) {
return new (zone_) Call(expression, arguments, pos, possibly_eval);
}
Call* NewTaggedTemplate(Expression* expression,
ZoneList<Expression*>* arguments, int pos) {
ZonePtrList<Expression>* arguments, int pos) {
return new (zone_)
Call(expression, arguments, pos, Call::TaggedTemplateTag::kTrue);
}
CallNew* NewCallNew(Expression* expression,
ZoneList<Expression*>* arguments,
int pos) {
ZonePtrList<Expression>* arguments, int pos) {
return new (zone_) CallNew(expression, arguments, pos);
}
CallRuntime* NewCallRuntime(Runtime::FunctionId id,
ZoneList<Expression*>* arguments, int pos) {
ZonePtrList<Expression>* arguments, int pos) {
return new (zone_) CallRuntime(Runtime::FunctionForId(id), arguments, pos);
}
CallRuntime* NewCallRuntime(const Runtime::Function* function,
ZoneList<Expression*>* arguments, int pos) {
ZonePtrList<Expression>* arguments, int pos) {
return new (zone_) CallRuntime(function, arguments, pos);
}
CallRuntime* NewCallRuntime(int context_index,
ZoneList<Expression*>* arguments, int pos) {
ZonePtrList<Expression>* arguments, int pos) {
return new (zone_) CallRuntime(context_index, arguments, pos);
}
@ -3158,7 +3157,7 @@ class AstNodeFactory final BASE_EMBEDDED {
FunctionLiteral* NewFunctionLiteral(
const AstRawString* name, DeclarationScope* scope,
ZoneList<Statement*>* body, int expected_property_count,
ZonePtrList<Statement>* body, int expected_property_count,
int parameter_count, int function_length,
FunctionLiteral::ParameterFlag has_duplicate_parameters,
FunctionLiteral::FunctionType function_type,
@ -3176,7 +3175,7 @@ class AstNodeFactory final BASE_EMBEDDED {
// result of an eval (top-level or otherwise), or the result of calling
// the Function constructor.
FunctionLiteral* NewScriptOrEvalFunctionLiteral(DeclarationScope* scope,
ZoneList<Statement*>* body,
ZonePtrList<Statement>* body,
int expected_property_count,
int parameter_count) {
return new (zone_) FunctionLiteral(
@ -3184,7 +3183,7 @@ class AstNodeFactory final BASE_EMBEDDED {
body, expected_property_count, parameter_count, parameter_count,
FunctionLiteral::kAnonymousExpression,
FunctionLiteral::kNoDuplicateParameters,
FunctionLiteral::kShouldLazyCompile, 0, true,
FunctionLiteral::kShouldLazyCompile, 0, /* has_braces */ false,
FunctionLiteral::kIdTypeTopLevel);
}
@ -3198,7 +3197,7 @@ class AstNodeFactory final BASE_EMBEDDED {
ClassLiteral* NewClassLiteral(
Scope* scope, Variable* variable, Expression* extends,
FunctionLiteral* constructor,
ZoneList<ClassLiteral::Property*>* properties,
ZonePtrList<ClassLiteral::Property>* properties,
FunctionLiteral* static_fields_initializer,
FunctionLiteral* instance_fields_initializer_function, int start_position,
int end_position, bool has_name_static_property,
@ -3255,14 +3254,14 @@ class AstNodeFactory final BASE_EMBEDDED {
}
GetTemplateObject* NewGetTemplateObject(
const ZoneList<const AstRawString*>* cooked_strings,
const ZoneList<const AstRawString*>* raw_strings, int pos) {
const ZonePtrList<const AstRawString>* cooked_strings,
const ZonePtrList<const AstRawString>* raw_strings, int pos) {
return new (zone_) GetTemplateObject(cooked_strings, raw_strings, pos);
}
TemplateLiteral* NewTemplateLiteral(
const ZoneList<const AstRawString*>* string_parts,
const ZoneList<Expression*>* substitutions, int pos) {
const ZonePtrList<const AstRawString>* string_parts,
const ZonePtrList<Expression>* substitutions, int pos) {
return new (zone_) TemplateLiteral(string_parts, substitutions, pos);
}
@ -3271,7 +3270,7 @@ class AstNodeFactory final BASE_EMBEDDED {
}
InitializeClassFieldsStatement* NewInitializeClassFieldsStatement(
ZoneList<ClassLiteralProperty*>* args, int pos) {
ZonePtrList<ClassLiteral::Property>* args, int pos) {
return new (zone_) InitializeClassFieldsStatement(args, pos);
}

View File

@ -1,53 +0,0 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/ast/compile-time-value.h"
#include "src/ast/ast.h"
#include "src/handles-inl.h"
#include "src/heap/factory.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
bool CompileTimeValue::IsCompileTimeValue(Expression* expression) {
if (expression->IsLiteral()) return true;
MaterializedLiteral* literal = expression->AsMaterializedLiteral();
if (literal == nullptr) return false;
return literal->IsSimple();
}
Handle<FixedArray> CompileTimeValue::GetValue(Isolate* isolate,
Expression* expression) {
Factory* factory = isolate->factory();
DCHECK(IsCompileTimeValue(expression));
Handle<FixedArray> result = factory->NewFixedArray(2, TENURED);
if (expression->IsObjectLiteral()) {
ObjectLiteral* object_literal = expression->AsObjectLiteral();
DCHECK(object_literal->is_simple());
int literalTypeFlag = object_literal->EncodeLiteralType();
DCHECK_NE(kArrayLiteralFlag, literalTypeFlag);
result->set(kLiteralTypeSlot, Smi::FromInt(literalTypeFlag));
result->set(kElementsSlot, *object_literal->constant_properties());
} else {
ArrayLiteral* array_literal = expression->AsArrayLiteral();
DCHECK(array_literal->is_simple());
result->set(kLiteralTypeSlot, Smi::FromInt(kArrayLiteralFlag));
result->set(kElementsSlot, *array_literal->constant_elements());
}
return result;
}
int CompileTimeValue::GetLiteralTypeFlags(Handle<FixedArray> value) {
return Smi::ToInt(value->get(kLiteralTypeSlot));
}
Handle<HeapObject> CompileTimeValue::GetElements(Handle<FixedArray> value) {
return Handle<HeapObject>(HeapObject::cast(value->get(kElementsSlot)));
}
} // namespace internal
} // namespace v8

View File

@ -1,46 +0,0 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_AST_COMPILE_TIME_VALUE_H_
#define V8_AST_COMPILE_TIME_VALUE_H_
#include "src/allocation.h"
#include "src/globals.h"
namespace v8 {
namespace internal {
class Expression;
// Support for handling complex values (array and object literals) that
// can be fully handled at compile time.
class CompileTimeValue : public AllStatic {
public:
// This is a special marker used to encode array literals. The value has to be
// different from any value possibly returned by
// ObjectLiteral::EncodeLiteralType.
static const int kArrayLiteralFlag = -1;
static bool IsCompileTimeValue(Expression* expression);
// Get the value as a compile time value.
static Handle<FixedArray> GetValue(Isolate* isolate, Expression* expression);
// Get the encoded literal type. This can either be kArrayLiteralFlag or
// encoded properties of an ObjectLiteral returned by
// ObjectLiteral::EncodeLiteralType.
static int GetLiteralTypeFlags(Handle<FixedArray> value);
// Get the elements of a compile time value returned by GetValue().
static Handle<HeapObject> GetElements(Handle<FixedArray> value);
private:
static const int kLiteralTypeSlot = 0;
static const int kElementsSlot = 1;
};
} // namespace internal
} // namespace v8
#endif // V8_AST_COMPILE_TIME_VALUE_H_

View File

@ -6,12 +6,27 @@
#include "src/ast/ast-value-factory.h"
#include "src/ast/scopes.h"
#include "src/objects-inl.h"
#include "src/objects/module.h"
#include "src/objects/module-inl.h"
#include "src/pending-compilation-error-handler.h"
namespace v8 {
namespace internal {
bool ModuleDescriptor::AstRawStringComparer::operator()(
const AstRawString* lhs, const AstRawString* rhs) const {
// Fast path for equal pointers: a pointer is not strictly less than itself.
if (lhs == rhs) return false;
// Order by contents (ordering by hash is unstable across runs).
if (lhs->is_one_byte() != rhs->is_one_byte()) {
return lhs->is_one_byte();
}
if (lhs->byte_length() != rhs->byte_length()) {
return lhs->byte_length() < rhs->byte_length();
}
return memcmp(lhs->raw_data(), rhs->raw_data(), lhs->byte_length()) < 0;
}
void ModuleDescriptor::AddImport(const AstRawString* import_name,
const AstRawString* local_name,
const AstRawString* module_request,

View File

@ -124,10 +124,21 @@ class ModuleDescriptor : public ZoneObject {
ModuleRequest(int index, int position) : index(index), position(position) {}
};
// Custom content-based comparer for the below maps, to keep them stable
// across parses.
struct AstRawStringComparer {
bool operator()(const AstRawString* lhs, const AstRawString* rhs) const;
};
typedef ZoneMap<const AstRawString*, ModuleRequest, AstRawStringComparer>
ModuleRequestMap;
typedef ZoneMultimap<const AstRawString*, Entry*, AstRawStringComparer>
RegularExportMap;
typedef ZoneMap<const AstRawString*, Entry*, AstRawStringComparer>
RegularImportMap;
// Module requests.
const ZoneMap<const AstRawString*, ModuleRequest>& module_requests() const {
return module_requests_;
}
const ModuleRequestMap& module_requests() const { return module_requests_; }
// Namespace imports.
const ZoneVector<const Entry*>& namespace_imports() const {
@ -135,9 +146,7 @@ class ModuleDescriptor : public ZoneObject {
}
// All the remaining imports, indexed by local name.
const ZoneMap<const AstRawString*, Entry*>& regular_imports() const {
return regular_imports_;
}
const RegularImportMap& regular_imports() const { return regular_imports_; }
// Star exports and explicitly indirect exports.
const ZoneVector<const Entry*>& special_exports() const {
@ -146,9 +155,7 @@ class ModuleDescriptor : public ZoneObject {
// All the remaining exports, indexed by local name.
// After canonicalization (see Validate), these are exactly the local exports.
const ZoneMultimap<const AstRawString*, Entry*>& regular_exports() const {
return regular_exports_;
}
const RegularExportMap& regular_exports() const { return regular_exports_; }
void AddRegularExport(Entry* entry) {
DCHECK_NOT_NULL(entry->export_name);
@ -188,11 +195,11 @@ class ModuleDescriptor : public ZoneObject {
Handle<ModuleInfo> module_info);
private:
ZoneMap<const AstRawString*, ModuleRequest> module_requests_;
ModuleRequestMap module_requests_;
ZoneVector<const Entry*> special_exports_;
ZoneVector<const Entry*> namespace_imports_;
ZoneMultimap<const AstRawString*, Entry*> regular_exports_;
ZoneMap<const AstRawString*, Entry*> regular_imports_;
RegularExportMap regular_exports_;
RegularImportMap regular_imports_;
// If there are multiple export entries with the same export name, return the
// last of them (in source order). Otherwise return nullptr.

View File

@ -498,16 +498,14 @@ void CallPrinter::VisitRewritableExpression(RewritableExpression* node) {
Find(node->expression());
}
void CallPrinter::FindStatements(ZoneList<Statement*>* statements) {
void CallPrinter::FindStatements(ZonePtrList<Statement>* statements) {
if (statements == nullptr) return;
for (int i = 0; i < statements->length(); i++) {
Find(statements->at(i));
}
}
void CallPrinter::FindArguments(ZoneList<Expression*>* arguments) {
void CallPrinter::FindArguments(ZonePtrList<Expression>* arguments) {
if (found_) return;
for (int i = 0; i < arguments->length(); i++) {
Find(arguments->at(i));
@ -589,7 +587,7 @@ void AstPrinter::Print(const char* format, ...) {
}
}
void AstPrinter::PrintLabels(ZoneList<const AstRawString*>* labels) {
void AstPrinter::PrintLabels(ZonePtrList<const AstRawString>* labels) {
if (labels != nullptr) {
for (int i = 0; i < labels->length(); i++) {
PrintLiteral(labels->at(i), false);
@ -748,8 +746,7 @@ void AstPrinter::PrintLiteralWithModeIndented(const char* info, Variable* var,
}
}
void AstPrinter::PrintLabelsIndented(ZoneList<const AstRawString*>* labels) {
void AstPrinter::PrintLabelsIndented(ZonePtrList<const AstRawString>* labels) {
if (labels == nullptr || labels->length() == 0) return;
PrintIndented("LABELS ");
PrintLabels(labels);
@ -809,15 +806,13 @@ void AstPrinter::PrintParameters(DeclarationScope* scope) {
}
}
void AstPrinter::PrintStatements(ZoneList<Statement*>* statements) {
void AstPrinter::PrintStatements(ZonePtrList<Statement>* statements) {
for (int i = 0; i < statements->length(); i++) {
Visit(statements->at(i));
}
}
void AstPrinter::PrintArguments(ZoneList<Expression*>* arguments) {
void AstPrinter::PrintArguments(ZonePtrList<Expression>* arguments) {
for (int i = 0; i < arguments->length(); i++) {
Visit(arguments->at(i));
}
@ -1040,7 +1035,7 @@ void AstPrinter::VisitInitializeClassFieldsStatement(
}
void AstPrinter::PrintClassProperties(
ZoneList<ClassLiteral::Property*>* properties) {
ZonePtrList<ClassLiteral::Property>* properties) {
for (int i = 0; i < properties->length(); i++) {
ClassLiteral::Property* property = properties->at(i);
const char* prop_kind = nullptr;
@ -1119,7 +1114,7 @@ void AstPrinter::VisitObjectLiteral(ObjectLiteral* node) {
}
void AstPrinter::PrintObjectProperties(
ZoneList<ObjectLiteral::Property*>* properties) {
ZonePtrList<ObjectLiteral::Property>* properties) {
for (int i = 0; i < properties->length(); i++) {
ObjectLiteral::Property* property = properties->at(i);
const char* prop_kind = nullptr;

View File

@ -56,8 +56,8 @@ class CallPrinter final : public AstVisitor<CallPrinter> {
protected:
void PrintLiteral(Handle<Object> value, bool quote);
void PrintLiteral(const AstRawString* value, bool quote);
void FindStatements(ZoneList<Statement*>* statements);
void FindArguments(ZoneList<Expression*>* arguments);
void FindStatements(ZonePtrList<Statement>* statements);
void FindArguments(ZonePtrList<Expression>* arguments);
};
@ -88,17 +88,17 @@ class AstPrinter final : public AstVisitor<AstPrinter> {
void Init();
void PrintLabels(ZoneList<const AstRawString*>* labels);
void PrintLabels(ZonePtrList<const AstRawString>* labels);
void PrintLiteral(const AstRawString* value, bool quote);
void PrintLiteral(const AstConsString* value, bool quote);
void PrintLiteral(Literal* literal, bool quote);
void PrintIndented(const char* txt);
void PrintIndentedVisit(const char* s, AstNode* node);
void PrintStatements(ZoneList<Statement*>* statements);
void PrintStatements(ZonePtrList<Statement>* statements);
void PrintDeclarations(Declaration::List* declarations);
void PrintParameters(DeclarationScope* scope);
void PrintArguments(ZoneList<Expression*>* arguments);
void PrintArguments(ZonePtrList<Expression>* arguments);
void PrintCaseClause(CaseClause* clause);
void PrintLiteralIndented(const char* info, Literal* literal, bool quote);
void PrintLiteralIndented(const char* info, const AstRawString* value,
@ -107,9 +107,9 @@ class AstPrinter final : public AstVisitor<AstPrinter> {
bool quote);
void PrintLiteralWithModeIndented(const char* info, Variable* var,
const AstRawString* value);
void PrintLabelsIndented(ZoneList<const AstRawString*>* labels);
void PrintObjectProperties(ZoneList<ObjectLiteral::Property*>* properties);
void PrintClassProperties(ZoneList<ClassLiteral::Property*>* properties);
void PrintLabelsIndented(ZonePtrList<const AstRawString>* labels);
void PrintObjectProperties(ZonePtrList<ObjectLiteral::Property>* properties);
void PrintClassProperties(ZonePtrList<ClassLiteral::Property>* properties);
void inc_indent() { indent_++; }
void dec_indent() { indent_--; }

View File

@ -13,10 +13,11 @@
#include "src/counters.h"
#include "src/messages.h"
#include "src/objects-inl.h"
#include "src/objects/module.h"
#include "src/objects/module-inl.h"
#include "src/objects/scope-info.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/preparsed-scope-data.h"
#include "src/zone/zone-list-inl.h"
namespace v8 {
namespace internal {
@ -75,8 +76,8 @@ Variable* VariableMap::DeclareName(Zone* zone, const AstRawString* name,
if (p->value == nullptr) {
// The variable has not been declared yet -> insert it.
DCHECK_EQ(name, p->key);
p->value =
mode == VAR ? kDummyPreParserVariable : kDummyPreParserLexicalVariable;
p->value = mode == VariableMode::kVar ? kDummyPreParserVariable
: kDummyPreParserLexicalVariable;
}
return reinterpret_cast<Variable*>(p->value);
}
@ -189,6 +190,13 @@ DeclarationScope::DeclarationScope(Zone* zone, Scope* outer_scope,
SetDefaults();
}
bool DeclarationScope::IsDeclaredParameter(const AstRawString* name) {
// If IsSimpleParameterList is false, duplicate parameters are not allowed,
// however `arguments` may be allowed if function is not strict code. Thus,
// the assumptions explained above do not hold.
return params_.Contains(variables_.Lookup(name));
}
ModuleScope::ModuleScope(DeclarationScope* script_scope,
AstValueFactory* ast_value_factory)
: DeclarationScope(ast_value_factory->zone(), script_scope, MODULE_SCOPE,
@ -199,11 +207,10 @@ ModuleScope::ModuleScope(DeclarationScope* script_scope,
DeclareThis(ast_value_factory);
}
ModuleScope::ModuleScope(Handle<ScopeInfo> scope_info,
ModuleScope::ModuleScope(Isolate* isolate, Handle<ScopeInfo> scope_info,
AstValueFactory* avfactory)
: DeclarationScope(avfactory->zone(), MODULE_SCOPE, scope_info) {
Zone* zone = avfactory->zone();
Isolate* isolate = scope_info->GetIsolate();
Handle<ModuleInfo> module_info(scope_info->ModuleDescriptorInfo(), isolate);
set_language_mode(LanguageMode::kStrict);
@ -289,8 +296,9 @@ Scope::Scope(Zone* zone, const AstRawString* catch_variable_name,
// Cache the catch variable, even though it's also available via the
// scope_info, as the parser expects that a catch scope always has the catch
// variable as first and only variable.
Variable* variable = Declare(zone, catch_variable_name, VAR, NORMAL_VARIABLE,
kCreatedInitialized, maybe_assigned);
Variable* variable =
Declare(zone, catch_variable_name, VariableMode::kVar, NORMAL_VARIABLE,
kCreatedInitialized, maybe_assigned);
AllocateHeapSlot(variable);
}
@ -389,7 +397,8 @@ bool Scope::ContainsAsmModule() const {
return false;
}
Scope* Scope::DeserializeScopeChain(Zone* zone, ScopeInfo* scope_info,
Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
ScopeInfo* scope_info,
DeclarationScope* script_scope,
AstValueFactory* ast_value_factory,
DeserializationMode deserialization_mode) {
@ -400,7 +409,8 @@ Scope* Scope::DeserializeScopeChain(Zone* zone, ScopeInfo* scope_info,
while (scope_info) {
if (scope_info->scope_type() == WITH_SCOPE) {
// For scope analysis, debug-evaluate is equivalent to a with scope.
outer_scope = new (zone) Scope(zone, WITH_SCOPE, handle(scope_info));
outer_scope =
new (zone) Scope(zone, WITH_SCOPE, handle(scope_info, isolate));
// TODO(yangguo): Remove once debug-evaluate properly keeps track of the
// function scope in which we are evaluating.
@ -412,40 +422,40 @@ Scope* Scope::DeserializeScopeChain(Zone* zone, ScopeInfo* scope_info,
// scope info of this script context onto the existing script scope to
// avoid nesting script scopes.
if (deserialization_mode == DeserializationMode::kIncludingVariables) {
script_scope->SetScriptScopeInfo(handle(scope_info));
script_scope->SetScriptScopeInfo(handle(scope_info, isolate));
}
DCHECK(!scope_info->HasOuterScopeInfo());
break;
} else if (scope_info->scope_type() == FUNCTION_SCOPE) {
outer_scope =
new (zone) DeclarationScope(zone, FUNCTION_SCOPE, handle(scope_info));
outer_scope = new (zone)
DeclarationScope(zone, FUNCTION_SCOPE, handle(scope_info, isolate));
if (scope_info->IsAsmModule())
outer_scope->AsDeclarationScope()->set_asm_module();
} else if (scope_info->scope_type() == EVAL_SCOPE) {
outer_scope =
new (zone) DeclarationScope(zone, EVAL_SCOPE, handle(scope_info));
outer_scope = new (zone)
DeclarationScope(zone, EVAL_SCOPE, handle(scope_info, isolate));
} else if (scope_info->scope_type() == BLOCK_SCOPE) {
if (scope_info->is_declaration_scope()) {
outer_scope =
new (zone) DeclarationScope(zone, BLOCK_SCOPE, handle(scope_info));
outer_scope = new (zone)
DeclarationScope(zone, BLOCK_SCOPE, handle(scope_info, isolate));
} else {
outer_scope = new (zone) Scope(zone, BLOCK_SCOPE, handle(scope_info));
outer_scope =
new (zone) Scope(zone, BLOCK_SCOPE, handle(scope_info, isolate));
}
} else if (scope_info->scope_type() == MODULE_SCOPE) {
outer_scope =
new (zone) ModuleScope(handle(scope_info), ast_value_factory);
outer_scope = new (zone)
ModuleScope(isolate, handle(scope_info, isolate), ast_value_factory);
} else {
DCHECK_EQ(scope_info->scope_type(), CATCH_SCOPE);
DCHECK_EQ(scope_info->LocalCount(), 1);
DCHECK_EQ(scope_info->ContextLocalCount(), 1);
DCHECK_EQ(scope_info->ContextLocalMode(0), VAR);
DCHECK_EQ(scope_info->ContextLocalMode(0), VariableMode::kVar);
DCHECK_EQ(scope_info->ContextLocalInitFlag(0), kCreatedInitialized);
String* name = scope_info->ContextLocalName(0);
MaybeAssignedFlag maybe_assigned =
scope_info->ContextLocalMaybeAssignedFlag(0);
outer_scope =
new (zone) Scope(zone, ast_value_factory->GetString(handle(name)),
maybe_assigned, handle(scope_info));
outer_scope = new (zone)
Scope(zone, ast_value_factory->GetString(handle(name, isolate)),
maybe_assigned, handle(scope_info, isolate));
}
if (deserialization_mode == DeserializationMode::kScopesOnly) {
outer_scope->scope_info_ = Handle<ScopeInfo>::null();
@ -605,12 +615,13 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
// Based on the preceding checks, it doesn't matter what we pass as
// sloppy_mode_block_scope_function_redefinition.
bool ok = true;
DeclareVariable(declaration, VAR,
Variable::DefaultInitializationFlag(VAR), nullptr, &ok);
DeclareVariable(declaration, VariableMode::kVar,
Variable::DefaultInitializationFlag(VariableMode::kVar),
nullptr, &ok);
DCHECK(ok);
} else {
DCHECK(is_being_lazily_parsed_);
Variable* var = DeclareVariableName(name, VAR);
Variable* var = DeclareVariableName(name, VariableMode::kVar);
if (var != kDummyPreParserVariable &&
var != kDummyPreParserLexicalVariable) {
DCHECK(FLAG_preparser_scope_analysis);
@ -633,7 +644,7 @@ void DeclarationScope::AttachOuterScopeInfo(ParseInfo* info, Isolate* isolate) {
DeclarationScope(info->zone(), info->ast_value_factory());
info->set_script_scope(script_scope);
ReplaceOuterScope(Scope::DeserializeScopeChain(
info->zone(), *outer_scope_info, script_scope,
isolate, info->zone(), *outer_scope_info, script_scope,
info->ast_value_factory(),
Scope::DeserializationMode::kIncludingVariables));
} else {
@ -703,7 +714,8 @@ void DeclarationScope::DeclareThis(AstValueFactory* ast_value_factory) {
bool derived_constructor = IsDerivedConstructor(function_kind_);
Variable* var =
Declare(zone(), ast_value_factory->this_string(),
derived_constructor ? CONST : VAR, THIS_VARIABLE,
derived_constructor ? VariableMode::kConst : VariableMode::kVar,
THIS_VARIABLE,
derived_constructor ? kNeedsInitialization : kCreatedInitialized);
receiver_ = var;
}
@ -717,7 +729,8 @@ void DeclarationScope::DeclareArguments(AstValueFactory* ast_value_factory) {
// Declare 'arguments' variable which exists in all non arrow functions.
// Note that it might never be accessed, in which case it won't be
// allocated during variable allocation.
arguments_ = Declare(zone(), ast_value_factory->arguments_string(), VAR);
arguments_ = Declare(zone(), ast_value_factory->arguments_string(),
VariableMode::kVar);
} else if (IsLexical(arguments_)) {
// Check if there's lexically declared variable named arguments to avoid
// redeclaration. See ES#sec-functiondeclarationinstantiation, step 20.
@ -731,12 +744,14 @@ void DeclarationScope::DeclareDefaultFunctionVariables(
DCHECK(!is_arrow_scope());
DeclareThis(ast_value_factory);
new_target_ = Declare(zone(), ast_value_factory->new_target_string(), CONST);
new_target_ = Declare(zone(), ast_value_factory->new_target_string(),
VariableMode::kConst);
if (IsConciseMethod(function_kind_) || IsClassConstructor(function_kind_) ||
IsAccessorFunction(function_kind_)) {
EnsureRareData()->this_function =
Declare(zone(), ast_value_factory->this_function_string(), CONST);
Declare(zone(), ast_value_factory->this_function_string(),
VariableMode::kConst);
}
}
@ -746,10 +761,10 @@ Variable* DeclarationScope::DeclareFunctionVar(const AstRawString* name) {
DCHECK_NULL(variables_.Lookup(name));
VariableKind kind = is_sloppy(language_mode()) ? SLOPPY_FUNCTION_NAME_VARIABLE
: NORMAL_VARIABLE;
function_ =
new (zone()) Variable(this, name, CONST, kind, kCreatedInitialized);
function_ = new (zone())
Variable(this, name, VariableMode::kConst, kind, kCreatedInitialized);
if (calls_sloppy_eval()) {
NonLocal(name, DYNAMIC);
NonLocal(name, VariableMode::kDynamic);
} else {
variables_.Add(zone(), function_);
}
@ -913,11 +928,12 @@ void Scope::Snapshot::Reparent(DeclarationScope* new_parent) const {
new_parent->locals_.MoveTail(outer_closure->locals(), top_local_);
for (Variable* local : new_parent->locals_) {
DCHECK(local->mode() == TEMPORARY || local->mode() == VAR);
DCHECK(local->mode() == VariableMode::kTemporary ||
local->mode() == VariableMode::kVar);
DCHECK_EQ(local->scope(), local->scope()->GetClosureScope());
DCHECK_NE(local->scope(), new_parent);
local->set_scope(new_parent);
if (local->mode() == VAR) {
if (local->mode() == VariableMode::kVar) {
outer_closure->variables_.Remove(local);
new_parent->variables_.Add(new_parent->zone(), local);
}
@ -949,10 +965,6 @@ Variable* Scope::LookupInScopeInfo(const AstRawString* name) {
// The Scope is backed up by ScopeInfo. This means it cannot operate in a
// heap-independent mode, and all strings must be internalized immediately. So
// it's ok to get the Handle<String> here.
// If we have a serialized scope info, we might find the variable there.
// There should be no local slot with the given name.
DCHECK_LT(scope_info_->StackSlotIndex(*name_handle), 0);
bool found = false;
VariableLocation location;
@ -979,7 +991,7 @@ Variable* Scope::LookupInScopeInfo(const AstRawString* name) {
index = scope_info_->FunctionContextSlotIndex(*name_handle);
if (index < 0) return nullptr; // Nowhere found.
Variable* var = AsDeclarationScope()->DeclareFunctionVar(name);
DCHECK_EQ(CONST, var->mode());
DCHECK_EQ(VariableMode::kConst, var->mode());
var->AllocateTo(VariableLocation::CONTEXT, index);
return variables_.Lookup(name);
}
@ -1016,10 +1028,10 @@ Variable* DeclarationScope::DeclareParameter(
DCHECK(!is_being_lazily_parsed_);
DCHECK(!was_lazily_parsed_);
Variable* var;
if (mode == TEMPORARY) {
if (mode == VariableMode::kTemporary) {
var = NewTemporary(name);
} else {
DCHECK_EQ(mode, VAR);
DCHECK_EQ(mode, VariableMode::kVar);
var = Declare(zone(), name, mode);
// TODO(wingo): Avoid O(n^2) check.
if (is_duplicate != nullptr) {
@ -1049,17 +1061,17 @@ Variable* DeclarationScope::DeclareParameterName(
if (FLAG_preparser_scope_analysis) {
Variable* var;
if (declare_as_local) {
var = Declare(zone(), name, VAR);
var = Declare(zone(), name, VariableMode::kVar);
} else {
var = new (zone())
Variable(this, name, TEMPORARY, NORMAL_VARIABLE, kCreatedInitialized);
var = new (zone()) Variable(this, name, VariableMode::kTemporary,
NORMAL_VARIABLE, kCreatedInitialized);
}
if (add_parameter) {
params_.Add(var, zone());
}
return var;
}
DeclareVariableName(name, VAR);
DeclareVariableName(name, VariableMode::kVar);
return nullptr;
}
@ -1067,12 +1079,14 @@ Variable* Scope::DeclareLocal(const AstRawString* name, VariableMode mode,
InitializationFlag init_flag, VariableKind kind,
MaybeAssignedFlag maybe_assigned_flag) {
DCHECK(!already_resolved_);
// This function handles VAR, LET, and CONST modes. DYNAMIC variables are
// introduced during variable allocation, and TEMPORARY variables are
// allocated via NewTemporary().
// This function handles VariableMode::kVar, VariableMode::kLet, and
// VariableMode::kConst modes. VariableMode::kDynamic variables are
// introduced during variable allocation, and VariableMode::kTemporary
// variables are allocated via NewTemporary().
DCHECK(IsDeclaredVariableMode(mode));
DCHECK_IMPLIES(GetDeclarationScope()->is_being_lazily_parsed(),
mode == VAR || mode == LET || mode == CONST);
mode == VariableMode::kVar || mode == VariableMode::kLet ||
mode == VariableMode::kConst);
DCHECK(!GetDeclarationScope()->was_lazily_parsed());
return Declare(zone(), name, mode, kind, init_flag, maybe_assigned_flag);
}
@ -1085,7 +1099,7 @@ Variable* Scope::DeclareVariable(
DCHECK(!GetDeclarationScope()->is_being_lazily_parsed());
DCHECK(!GetDeclarationScope()->was_lazily_parsed());
if (mode == VAR && !is_declaration_scope()) {
if (mode == VariableMode::kVar && !is_declaration_scope()) {
return GetDeclarationScope()->DeclareVariable(
declaration, mode, init, sloppy_mode_block_scope_function_redefinition,
ok);
@ -1108,11 +1122,12 @@ Variable* Scope::DeclareVariable(
// assigned because they might be accessed by a lazily parsed top-level
// function, which, for efficiency, we preparse without variable tracking.
if (is_script_scope() || is_module_scope()) {
if (mode != CONST) proxy->set_is_assigned();
if (mode != VariableMode::kConst) proxy->set_is_assigned();
}
Variable* var = nullptr;
if (is_eval_scope() && is_sloppy(language_mode()) && mode == VAR) {
if (is_eval_scope() && is_sloppy(language_mode()) &&
mode == VariableMode::kVar) {
// In a var binding in a sloppy direct eval, pollute the enclosing scope
// with this new binding by doing the following:
// The proxy is bound to a lookup variable to force a dynamic declaration
@ -1173,7 +1188,7 @@ Variable* Scope::DeclareVariable(
*ok = false;
return nullptr;
}
} else if (mode == VAR) {
} else if (mode == VariableMode::kVar) {
var->set_maybe_assigned();
}
}
@ -1199,7 +1214,7 @@ Variable* Scope::DeclareVariableName(const AstRawString* name,
DCHECK(!already_resolved_);
DCHECK(GetDeclarationScope()->is_being_lazily_parsed());
if (mode == VAR && !is_declaration_scope()) {
if (mode == VariableMode::kVar && !is_declaration_scope()) {
return GetDeclarationScope()->DeclareVariableName(name, mode);
}
DCHECK(!is_with_scope());
@ -1220,7 +1235,7 @@ Variable* Scope::DeclareVariableName(const AstRawString* name,
// a function declaration, it's an error. This is an error PreParser
// hasn't previously detected. TODO(marja): Investigate whether we can now
// start returning this error.
} else if (mode == VAR) {
} else if (mode == VariableMode::kVar) {
var->set_maybe_assigned();
}
var->set_is_used();
@ -1237,9 +1252,9 @@ void Scope::DeclareCatchVariableName(const AstRawString* name) {
DCHECK(scope_info_.is_null());
if (FLAG_preparser_scope_analysis) {
Declare(zone(), name, VAR);
Declare(zone(), name, VariableMode::kVar);
} else {
variables_.DeclareName(zone(), name, VAR);
variables_.DeclareName(zone(), name, VariableMode::kVar);
}
}
@ -1253,11 +1268,11 @@ void Scope::AddUnresolved(VariableProxy* proxy) {
Variable* DeclarationScope::DeclareDynamicGlobal(const AstRawString* name,
VariableKind kind) {
DCHECK(is_script_scope());
return variables_.Declare(zone(), this, name, DYNAMIC_GLOBAL, kind);
return variables_.Declare(zone(), this, name, VariableMode::kDynamicGlobal,
kind);
// TODO(neis): Mark variable as maybe-assigned?
}
bool Scope::RemoveUnresolved(VariableProxy* var) {
if (unresolved_ == var) {
unresolved_ = var->next_unresolved();
@ -1284,8 +1299,8 @@ Variable* Scope::NewTemporary(const AstRawString* name) {
Variable* Scope::NewTemporary(const AstRawString* name,
MaybeAssignedFlag maybe_assigned) {
DeclarationScope* scope = GetClosureScope();
Variable* var = new (zone())
Variable(scope, name, TEMPORARY, NORMAL_VARIABLE, kCreatedInitialized);
Variable* var = new (zone()) Variable(scope, name, VariableMode::kTemporary,
NORMAL_VARIABLE, kCreatedInitialized);
scope->AddLocal(var);
if (maybe_assigned == kMaybeAssigned) var->set_maybe_assigned();
return var;
@ -1302,7 +1317,7 @@ Declaration* Scope::CheckConflictingVarDeclarations() {
Scope* current = this;
if (decl->IsVariableDeclaration() &&
decl->AsVariableDeclaration()->AsNested() != nullptr) {
DCHECK_EQ(mode, VAR);
DCHECK_EQ(mode, VariableMode::kVar);
current = decl->AsVariableDeclaration()->AsNested()->scope();
} else if (IsLexicalVariableMode(mode)) {
if (!is_block_scope()) continue;
@ -1327,7 +1342,7 @@ Declaration* Scope::CheckConflictingVarDeclarations() {
}
Declaration* Scope::CheckLexDeclarationsConflictingWith(
const ZoneList<const AstRawString*>& names) {
const ZonePtrList<const AstRawString>& names) {
DCHECK(is_block_scope());
for (int i = 0; i < names.length(); ++i) {
Variable* var = LookupLocal(names.at(i));
@ -1467,11 +1482,11 @@ Scope* Scope::GetOuterScopeWithContext() {
}
Handle<StringSet> DeclarationScope::CollectNonLocals(
ParseInfo* info, Handle<StringSet> non_locals) {
Isolate* isolate, ParseInfo* info, Handle<StringSet> non_locals) {
VariableProxy* free_variables = FetchFreeVariables(this, info);
for (VariableProxy* proxy = free_variables; proxy != nullptr;
proxy = proxy->next_unresolved()) {
non_locals = StringSet::Add(non_locals, proxy->name());
non_locals = StringSet::Add(isolate, non_locals, proxy->name());
}
return non_locals;
}
@ -1759,7 +1774,7 @@ void Scope::Print(int n) {
{
bool printed_header = false;
for (Variable* local : locals_) {
if (local->mode() != TEMPORARY) continue;
if (local->mode() != VariableMode::kTemporary) continue;
if (!printed_header) {
printed_header = true;
Indent(n1, "// temporary vars:\n");
@ -1829,7 +1844,8 @@ Variable* Scope::LookupRecursive(ParseInfo* info, VariableProxy* proxy,
// variables.
// TODO(yangguo): Remove once debug-evaluate creates proper ScopeInfo for the
// scopes in which it's evaluating.
if (is_debug_evaluate_scope_) return NonLocal(proxy->raw_name(), DYNAMIC);
if (is_debug_evaluate_scope_)
return NonLocal(proxy->raw_name(), VariableMode::kDynamic);
// Try to find the variable in this scope.
Variable* var = LookupLocal(proxy->raw_name());
@ -1892,7 +1908,7 @@ Variable* Scope::LookupRecursive(ParseInfo* info, VariableProxy* proxy,
var->ForceContextAllocation();
if (proxy->is_assigned()) var->set_maybe_assigned();
}
return NonLocal(proxy->raw_name(), DYNAMIC);
return NonLocal(proxy->raw_name(), VariableMode::kDynamic);
}
if (is_declaration_scope() && AsDeclarationScope()->calls_sloppy_eval()) {
@ -1904,13 +1920,13 @@ Variable* Scope::LookupRecursive(ParseInfo* info, VariableProxy* proxy,
// here (this excludes block and catch scopes), and variable lookups at
// script scope are always dynamic.
if (var->IsGlobalObjectProperty()) {
return NonLocal(proxy->raw_name(), DYNAMIC_GLOBAL);
return NonLocal(proxy->raw_name(), VariableMode::kDynamicGlobal);
}
if (var->is_dynamic()) return var;
Variable* invalidated = var;
var = NonLocal(proxy->raw_name(), DYNAMIC_LOCAL);
var = NonLocal(proxy->raw_name(), VariableMode::kDynamicLocal);
var->set_local_if_not_shadowed(invalidated);
}
@ -1937,11 +1953,11 @@ void SetNeedsHoleCheck(Variable* var, VariableProxy* proxy) {
}
void UpdateNeedsHoleCheck(Variable* var, VariableProxy* proxy, Scope* scope) {
if (var->mode() == DYNAMIC_LOCAL) {
if (var->mode() == VariableMode::kDynamicLocal) {
// Dynamically introduced variables never need a hole check (since they're
// VAR bindings, either from var or function declarations), but the variable
// they shadow might need a hole check, which we want to do if we decide
// that no shadowing variable was dynamically introoduced.
// VariableMode::kVar bindings, either from var or function declarations),
// but the variable they shadow might need a hole check, which we want to do
// if we decide that no shadowing variable was dynamically introoduced.
DCHECK_EQ(kCreatedInitialized, var->initialization_flag());
return UpdateNeedsHoleCheck(var->local_if_not_shadowed(), proxy, scope);
}
@ -1956,12 +1972,12 @@ void UpdateNeedsHoleCheck(Variable* var, VariableProxy* proxy, Scope* scope) {
}
// Check if the binding really needs an initialization check. The check
// can be skipped in the following situation: we have a LET or CONST
// binding, both the Variable and the VariableProxy have the same
// declaration scope (i.e. they are both in global code, in the
// same function or in the same eval code), the VariableProxy is in
// the source physically located after the initializer of the variable,
// and that the initializer cannot be skipped due to a nonlinear scope.
// can be skipped in the following situation: we have a VariableMode::kLet or
// VariableMode::kConst binding, both the Variable and the VariableProxy have
// the same declaration scope (i.e. they are both in global code, in the same
// function or in the same eval code), the VariableProxy is in the source
// physically located after the initializer of the variable, and that the
// initializer cannot be skipped due to a nonlinear scope.
//
// The condition on the closure scopes is a conservative check for
// nested functions that access a binding and are called before the
@ -2136,7 +2152,7 @@ bool Scope::MustAllocateInContext(Variable* var) {
//
// Temporary variables are always stack-allocated. Catch-bound variables are
// always context-allocated.
if (var->mode() == TEMPORARY) return false;
if (var->mode() == VariableMode::kTemporary) return false;
if (is_catch_scope()) return true;
if ((is_script_scope() || is_eval_scope()) &&
IsLexicalVariableMode(var->mode())) {
@ -2356,21 +2372,8 @@ void Scope::AllocateScopeInfosRecursively(Isolate* isolate,
}
}
void Scope::AllocateDebuggerScopeInfos(Isolate* isolate,
MaybeHandle<ScopeInfo> outer_scope) {
if (scope_info_.is_null()) {
scope_info_ = ScopeInfo::Create(isolate, zone(), this, outer_scope);
}
MaybeHandle<ScopeInfo> outer = NeedsContext() ? scope_info_ : outer_scope;
for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
if (scope->is_function_scope()) continue;
scope->AllocateDebuggerScopeInfos(isolate, outer);
}
}
// static
void DeclarationScope::AllocateScopeInfos(ParseInfo* info, Isolate* isolate,
AnalyzeMode mode) {
void DeclarationScope::AllocateScopeInfos(ParseInfo* info, Isolate* isolate) {
DeclarationScope* scope = info->literal()->scope();
if (!scope->scope_info_.is_null()) return; // Allocated by outer function.
@ -2380,9 +2383,6 @@ void DeclarationScope::AllocateScopeInfos(ParseInfo* info, Isolate* isolate,
}
scope->AllocateScopeInfosRecursively(isolate, outer_scope);
if (mode == AnalyzeMode::kDebugger) {
scope->AllocateDebuggerScopeInfos(isolate, outer_scope);
}
// The debugger expects all shared function infos to contain a scope info.
// Since the top-most scope will end up in a shared function info, make sure
@ -2396,7 +2396,8 @@ void DeclarationScope::AllocateScopeInfos(ParseInfo* info, Isolate* isolate,
// Ensuring that the outer script scope has a scope info avoids having
// special case for native contexts vs other contexts.
if (info->script_scope() && info->script_scope()->scope_info_.is_null()) {
info->script_scope()->scope_info_ = handle(ScopeInfo::Empty(isolate));
info->script_scope()->scope_info_ =
handle(ScopeInfo::Empty(isolate), isolate);
}
}

View File

@ -78,8 +78,6 @@ class SloppyBlockFunctionMap : public ZoneHashMap {
int count_;
};
enum class AnalyzeMode { kRegular, kDebugger };
// Global invariants after AST construction: Each reference (i.e. identifier)
// to a JavaScript variable (including global properties) is represented by a
// VariableProxy node. Immediately after AST construction and before variable
@ -134,7 +132,8 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
enum class DeserializationMode { kIncludingVariables, kScopesOnly };
static Scope* DeserializeScopeChain(Zone* zone, ScopeInfo* scope_info,
static Scope* DeserializeScopeChain(Isolate* isolate, Zone* zone,
ScopeInfo* scope_info,
DeclarationScope* script_scope,
AstValueFactory* ast_value_factory,
DeserializationMode deserialization_mode);
@ -256,7 +255,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// which is an error even though the two 'e's are declared in different
// scopes.
Declaration* CheckLexDeclarationsConflictingWith(
const ZoneList<const AstRawString*>& names);
const ZonePtrList<const AstRawString>& names);
// ---------------------------------------------------------------------------
// Scope-specific info.
@ -366,7 +365,8 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// Whether this needs to be represented by a runtime context.
bool NeedsContext() const {
// Catch scopes always have heap slots.
DCHECK(!is_catch_scope() || num_heap_slots() > 0);
DCHECK_IMPLIES(is_catch_scope(), num_heap_slots() > 0);
DCHECK_IMPLIES(is_with_scope(), num_heap_slots() > 0);
return num_heap_slots() > 0;
}
@ -646,12 +646,7 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
// Creates a script scope.
DeclarationScope(Zone* zone, AstValueFactory* ast_value_factory);
bool IsDeclaredParameter(const AstRawString* name) {
// If IsSimpleParameterList is false, duplicate parameters are not allowed,
// however `arguments` may be allowed if function is not strict code. Thus,
// the assumptions explained above do not hold.
return params_.Contains(variables_.Lookup(name));
}
bool IsDeclaredParameter(const AstRawString* name);
FunctionKind function_kind() const { return function_kind_; }
@ -812,7 +807,7 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
// The local variable 'arguments' if we need to allocate it; nullptr
// otherwise.
Variable* arguments() const {
DCHECK(!is_arrow_scope() || arguments_ == nullptr);
DCHECK_IMPLIES(is_arrow_scope(), arguments_ == nullptr);
return arguments_;
}
@ -867,10 +862,9 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
// Allocate ScopeInfos for top scope and any inner scopes that need them.
// Does nothing if ScopeInfo is already allocated.
static void AllocateScopeInfos(ParseInfo* info, Isolate* isolate,
AnalyzeMode mode);
static void AllocateScopeInfos(ParseInfo* info, Isolate* isolate);
Handle<StringSet> CollectNonLocals(ParseInfo* info,
Handle<StringSet> CollectNonLocals(Isolate* isolate, ParseInfo* info,
Handle<StringSet> non_locals);
// Determine if we can use lazy compilation for this scope.
@ -964,7 +958,7 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
bool has_inferred_function_name_ : 1;
// Parameter list in source order.
ZoneList<Variable*> params_;
ZonePtrList<Variable> params_;
// Map of function names to lists of functions defined in sloppy blocks
SloppyBlockFunctionMap* sloppy_block_function_map_;
// Convenience variable.
@ -1031,7 +1025,8 @@ class ModuleScope final : public DeclarationScope {
// The generated ModuleDescriptor does not preserve all information. In
// particular, its module_requests map will be empty because we no longer need
// the map after parsing.
ModuleScope(Handle<ScopeInfo> scope_info, AstValueFactory* ast_value_factory);
ModuleScope(Isolate* isolate, Handle<ScopeInfo> scope_info,
AstValueFactory* ast_value_factory);
ModuleDescriptor* module() const {
DCHECK_NOT_NULL(module_descriptor_);

View File

@ -26,7 +26,7 @@ Variable::Variable(Variable* other)
bool Variable::IsGlobalObjectProperty() const {
// Temporaries are never global, they must always be allocated in the
// activation frame.
return (IsDynamicVariableMode(mode()) || mode() == VAR) &&
return (IsDynamicVariableMode(mode()) || mode() == VariableMode::kVar) &&
scope_ != nullptr && scope_->is_script_scope();
}

View File

@ -36,7 +36,8 @@ class Variable final : public ZoneObject {
LocationField::encode(VariableLocation::UNALLOCATED) |
VariableKindField::encode(kind)) {
// Var declared variables never need initialization.
DCHECK(!(mode == VAR && initialization_flag == kNeedsInitialization));
DCHECK(!(mode == VariableMode::kVar &&
initialization_flag == kNeedsInitialization));
}
explicit Variable(Variable* other);
@ -137,7 +138,8 @@ class Variable final : public ZoneObject {
}
Variable* local_if_not_shadowed() const {
DCHECK(mode() == DYNAMIC_LOCAL && local_if_not_shadowed_ != nullptr);
DCHECK(mode() == VariableMode::kDynamicLocal &&
local_if_not_shadowed_ != nullptr);
return local_if_not_shadowed_;
}
@ -175,7 +177,8 @@ class Variable final : public ZoneObject {
static InitializationFlag DefaultInitializationFlag(VariableMode mode) {
DCHECK(IsDeclaredVariableMode(mode));
return mode == VAR ? kCreatedInitialized : kNeedsInitialization;
return mode == VariableMode::kVar ? kCreatedInitialized
: kNeedsInitialization;
}
typedef ThreadedList<Variable> List;

259
deps/v8/src/async-hooks-wrapper.cc vendored Normal file
View File

@ -0,0 +1,259 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/async-hooks-wrapper.h"
#include "src/d8.h"
namespace v8 {
void AsyncHooksWrap::Enable() { enabled_ = true; }
void AsyncHooksWrap::Disable() { enabled_ = false; }
v8::Local<v8::Function> AsyncHooksWrap::init_function() const {
return init_function_.Get(isolate_);
}
void AsyncHooksWrap::set_init_function(v8::Local<v8::Function> value) {
init_function_.Reset(isolate_, value);
}
v8::Local<v8::Function> AsyncHooksWrap::before_function() const {
return before_function_.Get(isolate_);
}
void AsyncHooksWrap::set_before_function(v8::Local<v8::Function> value) {
before_function_.Reset(isolate_, value);
}
v8::Local<v8::Function> AsyncHooksWrap::after_function() const {
return after_function_.Get(isolate_);
}
void AsyncHooksWrap::set_after_function(v8::Local<v8::Function> value) {
after_function_.Reset(isolate_, value);
}
v8::Local<v8::Function> AsyncHooksWrap::promiseResolve_function() const {
return promiseResolve_function_.Get(isolate_);
}
void AsyncHooksWrap::set_promiseResolve_function(
v8::Local<v8::Function> value) {
promiseResolve_function_.Reset(isolate_, value);
}
static AsyncHooksWrap* UnwrapHook(
const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
HandleScope scope(isolate);
Local<Object> hook = args.This();
Local<External> wrap = Local<External>::Cast(hook->GetInternalField(0));
void* ptr = wrap->Value();
return static_cast<AsyncHooksWrap*>(ptr);
}
static void EnableHook(const v8::FunctionCallbackInfo<v8::Value>& args) {
AsyncHooksWrap* wrap = UnwrapHook(args);
wrap->Enable();
}
static void DisableHook(const v8::FunctionCallbackInfo<v8::Value>& args) {
AsyncHooksWrap* wrap = UnwrapHook(args);
wrap->Disable();
}
async_id_t AsyncHooks::GetExecutionAsyncId() const {
return asyncContexts.top().execution_async_id;
}
async_id_t AsyncHooks::GetTriggerAsyncId() const {
return asyncContexts.top().trigger_async_id;
}
Local<Object> AsyncHooks::CreateHook(
const v8::FunctionCallbackInfo<v8::Value>& args) {
Isolate* isolate = args.GetIsolate();
EscapableHandleScope handle_scope(isolate);
Local<Context> currentContext = isolate->GetCurrentContext();
if (args.Length() != 1 || !args[0]->IsObject()) {
isolate->ThrowException(
String::NewFromUtf8(isolate, "Invalid arguments passed to createHook",
NewStringType::kNormal)
.ToLocalChecked());
return Local<Object>();
}
AsyncHooksWrap* wrap = new AsyncHooksWrap(isolate);
Local<Object> fn_obj = args[0].As<Object>();
#define SET_HOOK_FN(name) \
Local<Value> name##_v = \
fn_obj \
->Get(currentContext, \
String::NewFromUtf8(isolate, #name, NewStringType::kNormal) \
.ToLocalChecked()) \
.ToLocalChecked(); \
if (name##_v->IsFunction()) { \
wrap->set_##name##_function(name##_v.As<Function>()); \
}
SET_HOOK_FN(init);
SET_HOOK_FN(before);
SET_HOOK_FN(after);
SET_HOOK_FN(promiseResolve);
#undef SET_HOOK_FN
async_wraps_.push_back(wrap);
Local<Object> obj = async_hooks_templ.Get(isolate)
->NewInstance(currentContext)
.ToLocalChecked();
obj->SetInternalField(0, External::New(isolate, wrap));
return handle_scope.Escape(obj);
}
void AsyncHooks::ShellPromiseHook(PromiseHookType type, Local<Promise> promise,
Local<Value> parent) {
AsyncHooks* hooks =
PerIsolateData::Get(promise->GetIsolate())->GetAsyncHooks();
HandleScope handle_scope(hooks->isolate_);
Local<Context> currentContext = hooks->isolate_->GetCurrentContext();
if (type == PromiseHookType::kInit) {
++hooks->current_async_id;
Local<Integer> async_id =
Integer::New(hooks->isolate_, hooks->current_async_id);
promise->SetPrivate(currentContext,
hooks->async_id_smb.Get(hooks->isolate_), async_id);
if (parent->IsPromise()) {
Local<Promise> parent_promise = parent.As<Promise>();
Local<Value> parent_async_id =
parent_promise
->GetPrivate(hooks->isolate_->GetCurrentContext(),
hooks->async_id_smb.Get(hooks->isolate_))
.ToLocalChecked();
promise->SetPrivate(currentContext,
hooks->trigger_id_smb.Get(hooks->isolate_),
parent_async_id);
} else {
CHECK(parent->IsUndefined());
Local<Integer> trigger_id = Integer::New(hooks->isolate_, 0);
promise->SetPrivate(currentContext,
hooks->trigger_id_smb.Get(hooks->isolate_),
trigger_id);
}
} else if (type == PromiseHookType::kBefore) {
AsyncContext ctx;
ctx.execution_async_id =
promise
->GetPrivate(hooks->isolate_->GetCurrentContext(),
hooks->async_id_smb.Get(hooks->isolate_))
.ToLocalChecked()
.As<Integer>()
->Value();
ctx.trigger_async_id =
promise
->GetPrivate(hooks->isolate_->GetCurrentContext(),
hooks->trigger_id_smb.Get(hooks->isolate_))
.ToLocalChecked()
.As<Integer>()
->Value();
hooks->asyncContexts.push(ctx);
} else if (type == PromiseHookType::kAfter) {
hooks->asyncContexts.pop();
}
for (AsyncHooksWrap* wrap : hooks->async_wraps_) {
PromiseHookDispatch(type, promise, parent, wrap, hooks);
}
}
void AsyncHooks::Initialize() {
HandleScope handle_scope(isolate_);
async_hook_ctor.Reset(isolate_, FunctionTemplate::New(isolate_));
async_hook_ctor.Get(isolate_)->SetClassName(
String::NewFromUtf8(isolate_, "AsyncHook", NewStringType::kNormal)
.ToLocalChecked());
async_hooks_templ.Reset(isolate_,
async_hook_ctor.Get(isolate_)->InstanceTemplate());
async_hooks_templ.Get(isolate_)->SetInternalFieldCount(1);
async_hooks_templ.Get(isolate_)->Set(
String::NewFromUtf8(isolate_, "enable"),
FunctionTemplate::New(isolate_, EnableHook));
async_hooks_templ.Get(isolate_)->Set(
String::NewFromUtf8(isolate_, "disable"),
FunctionTemplate::New(isolate_, DisableHook));
async_id_smb.Reset(isolate_, Private::New(isolate_));
trigger_id_smb.Reset(isolate_, Private::New(isolate_));
isolate_->SetPromiseHook(ShellPromiseHook);
}
void AsyncHooks::Deinitialize() {
isolate_->SetPromiseHook(nullptr);
for (AsyncHooksWrap* wrap : async_wraps_) {
delete wrap;
}
}
void AsyncHooks::PromiseHookDispatch(PromiseHookType type,
Local<Promise> promise,
Local<Value> parent, AsyncHooksWrap* wrap,
AsyncHooks* hooks) {
if (!wrap->IsEnabled()) {
return;
}
HandleScope handle_scope(hooks->isolate_);
TryCatch try_catch(hooks->isolate_);
try_catch.SetVerbose(true);
Local<Value> rcv = Undefined(hooks->isolate_);
Local<Value> async_id =
promise
->GetPrivate(hooks->isolate_->GetCurrentContext(),
hooks->async_id_smb.Get(hooks->isolate_))
.ToLocalChecked();
Local<Value> args[1] = {async_id};
// Sacrifice the brevity for readability and debugfulness
if (type == PromiseHookType::kInit) {
if (!wrap->init_function().IsEmpty()) {
Local<Value> initArgs[4] = {
async_id,
String::NewFromUtf8(hooks->isolate_, "PROMISE",
NewStringType::kNormal)
.ToLocalChecked(),
promise
->GetPrivate(hooks->isolate_->GetCurrentContext(),
hooks->trigger_id_smb.Get(hooks->isolate_))
.ToLocalChecked(),
promise};
wrap->init_function()->Call(rcv, 4, initArgs);
}
} else if (type == PromiseHookType::kBefore) {
if (!wrap->before_function().IsEmpty()) {
wrap->before_function()->Call(rcv, 1, args);
}
} else if (type == PromiseHookType::kAfter) {
if (!wrap->after_function().IsEmpty()) {
wrap->after_function()->Call(rcv, 1, args);
}
} else if (type == PromiseHookType::kResolve) {
if (!wrap->promiseResolve_function().IsEmpty()) {
wrap->promiseResolve_function()->Call(rcv, 1, args);
}
}
if (try_catch.HasCaught()) {
Shell::ReportException(hooks->isolate_, &try_catch);
}
}
} // namespace v8

95
deps/v8/src/async-hooks-wrapper.h vendored Normal file
View File

@ -0,0 +1,95 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_ASYNC_HOOKS_WRAPPER_H_
#define V8_ASYNC_HOOKS_WRAPPER_H_
#include <stack>
#include "include/v8.h"
#include "src/objects.h"
namespace v8 {
typedef double async_id_t;
struct AsyncContext {
async_id_t execution_async_id;
async_id_t trigger_async_id;
};
class AsyncHooksWrap {
public:
explicit AsyncHooksWrap(Isolate* isolate) {
enabled_ = false;
isolate_ = isolate;
}
void Enable();
void Disable();
bool IsEnabled() const { return enabled_; }
inline v8::Local<v8::Function> init_function() const;
inline void set_init_function(v8::Local<v8::Function> value);
inline v8::Local<v8::Function> before_function() const;
inline void set_before_function(v8::Local<v8::Function> value);
inline v8::Local<v8::Function> after_function() const;
inline void set_after_function(v8::Local<v8::Function> value);
inline v8::Local<v8::Function> promiseResolve_function() const;
inline void set_promiseResolve_function(v8::Local<v8::Function> value);
private:
Isolate* isolate_;
Persistent<v8::Function> init_function_;
Persistent<v8::Function> before_function_;
Persistent<v8::Function> after_function_;
Persistent<v8::Function> promiseResolve_function_;
bool enabled_;
};
class AsyncHooks {
public:
explicit AsyncHooks(Isolate* isolate) {
isolate_ = isolate;
AsyncContext ctx;
ctx.execution_async_id = 1;
ctx.trigger_async_id = 0;
asyncContexts.push(ctx);
current_async_id = 1;
Initialize();
}
~AsyncHooks() { Deinitialize(); }
async_id_t GetExecutionAsyncId() const;
async_id_t GetTriggerAsyncId() const;
Local<Object> CreateHook(const v8::FunctionCallbackInfo<v8::Value>& args);
private:
std::vector<AsyncHooksWrap*> async_wraps_;
Isolate* isolate_;
Persistent<FunctionTemplate> async_hook_ctor;
Persistent<ObjectTemplate> async_hooks_templ;
Persistent<Private> async_id_smb;
Persistent<Private> trigger_id_smb;
void Initialize();
void Deinitialize();
static void ShellPromiseHook(PromiseHookType type, Local<Promise> promise,
Local<Value> parent);
static void PromiseHookDispatch(PromiseHookType type, Local<Promise> promise,
Local<Value> parent, AsyncHooksWrap* wrap,
AsyncHooks* hooks);
std::stack<AsyncContext> asyncContexts;
async_id_t current_async_id;
};
} // namespace v8
#endif // V8_ASYNC_HOOKS_WRAPPER_H_

View File

@ -31,7 +31,8 @@ namespace internal {
"Invalid ElementsKind for InternalArray or InternalPackedArray") \
V(kInvalidHandleScopeLevel, "Invalid HandleScope level") \
V(kInvalidJumpTableIndex, "Invalid jump table index") \
V(kInvalidRegisterFileInGenerator, "invalid register file in generator") \
V(kInvalidParametersAndRegistersInGenerator, \
"invalid parameters and registers in generator") \
V(kInvalidSharedFunctionInfoData, "Invalid SharedFunctionInfo data") \
V(kMissingBytecodeArray, "Missing bytecode array from function") \
V(kObjectNotTagged, "The object is not tagged") \

View File

@ -15,46 +15,6 @@ namespace v8 {
namespace base {
// Deprecated. Use std::atomic<T> for new code.
template <class T>
class AtomicNumber {
public:
AtomicNumber() : value_(0) {}
explicit AtomicNumber(T initial) : value_(initial) {}
// Returns the value after incrementing.
V8_INLINE T Increment(T increment) {
return static_cast<T>(base::Barrier_AtomicIncrement(
&value_, static_cast<base::AtomicWord>(increment)));
}
// Returns the value after decrementing.
V8_INLINE T Decrement(T decrement) {
return static_cast<T>(base::Barrier_AtomicIncrement(
&value_, -static_cast<base::AtomicWord>(decrement)));
}
V8_INLINE T Value() const {
return static_cast<T>(base::Acquire_Load(&value_));
}
V8_INLINE void SetValue(T new_value) {
base::Release_Store(&value_, static_cast<base::AtomicWord>(new_value));
}
V8_INLINE T operator=(T value) {
SetValue(value);
return value;
}
V8_INLINE T operator+=(T value) { return Increment(value); }
V8_INLINE T operator-=(T value) { return Decrement(value); }
private:
STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord));
base::AtomicWord value_;
};
// Flag using T atomically. Also accepts void* as T.
template <typename T>
class AtomicValue {

View File

@ -27,15 +27,15 @@ class Flags final {
typedef T flag_type;
typedef S mask_type;
Flags() : mask_(0) {}
Flags(flag_type flag) // NOLINT(runtime/explicit)
constexpr Flags() : mask_(0) {}
constexpr Flags(flag_type flag) // NOLINT(runtime/explicit)
: mask_(static_cast<S>(flag)) {}
explicit Flags(mask_type mask) : mask_(static_cast<S>(mask)) {}
constexpr explicit Flags(mask_type mask) : mask_(static_cast<S>(mask)) {}
bool operator==(flag_type flag) const {
constexpr bool operator==(flag_type flag) const {
return mask_ == static_cast<S>(flag);
}
bool operator!=(flag_type flag) const {
constexpr bool operator!=(flag_type flag) const {
return mask_ != static_cast<S>(flag);
}
@ -52,22 +52,34 @@ class Flags final {
return *this;
}
Flags operator&(const Flags& flags) const { return Flags(*this) &= flags; }
Flags operator|(const Flags& flags) const { return Flags(*this) |= flags; }
Flags operator^(const Flags& flags) const { return Flags(*this) ^= flags; }
constexpr Flags operator&(const Flags& flags) const {
return Flags(*this) &= flags;
}
constexpr Flags operator|(const Flags& flags) const {
return Flags(*this) |= flags;
}
constexpr Flags operator^(const Flags& flags) const {
return Flags(*this) ^= flags;
}
Flags& operator&=(flag_type flag) { return operator&=(Flags(flag)); }
Flags& operator|=(flag_type flag) { return operator|=(Flags(flag)); }
Flags& operator^=(flag_type flag) { return operator^=(Flags(flag)); }
Flags operator&(flag_type flag) const { return operator&(Flags(flag)); }
Flags operator|(flag_type flag) const { return operator|(Flags(flag)); }
Flags operator^(flag_type flag) const { return operator^(Flags(flag)); }
constexpr Flags operator&(flag_type flag) const {
return operator&(Flags(flag));
}
constexpr Flags operator|(flag_type flag) const {
return operator|(Flags(flag));
}
constexpr Flags operator^(flag_type flag) const {
return operator^(Flags(flag));
}
Flags operator~() const { return Flags(~mask_); }
constexpr Flags operator~() const { return Flags(~mask_); }
operator mask_type() const { return mask_; }
bool operator!() const { return !mask_; }
constexpr operator mask_type() const { return mask_; }
constexpr bool operator!() const { return !mask_; }
friend size_t hash_value(const Flags& flags) { return flags.mask_; }

136
deps/v8/src/base/list.h vendored Normal file
View File

@ -0,0 +1,136 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_BASE_LIST_H_
#define V8_BASE_LIST_H_
#include <atomic>
#include "src/base/logging.h"
namespace v8 {
namespace base {
template <class T>
class List {
public:
List() : front_(nullptr), back_(nullptr) {}
void PushBack(T* element) {
DCHECK(!element->list_node().next());
DCHECK(!element->list_node().prev());
if (back_) {
DCHECK(front_);
InsertAfter(element, back_);
} else {
AddFirstElement(element);
}
}
void PushFront(T* element) {
DCHECK(!element->list_node().next());
DCHECK(!element->list_node().prev());
if (front_) {
DCHECK(back_);
InsertBefore(element, front_);
} else {
AddFirstElement(element);
}
}
void Remove(T* element) {
DCHECK(Contains(element));
if (back_ == element) {
back_ = element->list_node().prev();
}
if (front_ == element) {
front_ = element->list_node().next();
}
T* next = element->list_node().next();
T* prev = element->list_node().prev();
if (next) next->list_node().set_prev(prev);
if (prev) prev->list_node().set_next(next);
element->list_node().set_prev(nullptr);
element->list_node().set_next(nullptr);
}
bool Contains(T* element) {
T* it = front_;
while (it) {
if (it == element) return true;
it = it->list_node().next();
}
return false;
}
bool Empty() { return !front_ && !back_; }
T* front() { return front_; }
T* back() { return back_; }
private:
void AddFirstElement(T* element) {
DCHECK(!back_);
DCHECK(!front_);
DCHECK(!element->list_node().next());
DCHECK(!element->list_node().prev());
element->list_node().set_prev(nullptr);
element->list_node().set_next(nullptr);
front_ = element;
back_ = element;
}
void InsertAfter(T* element, T* other) {
T* other_next = other->list_node().next();
element->list_node().set_next(other_next);
element->list_node().set_prev(other);
other->list_node().set_next(element);
if (other_next)
other_next->list_node().set_prev(element);
else
back_ = element;
}
void InsertBefore(T* element, T* other) {
T* other_prev = other->list_node().prev();
element->list_node().set_next(other);
element->list_node().set_prev(other_prev);
other->list_node().set_prev(element);
if (other_prev) {
other_prev->list_node().set_next(element);
} else {
front_ = element;
}
}
T* front_;
T* back_;
};
template <class T>
class ListNode {
public:
ListNode() { Initialize(); }
T* next() { return next_; }
T* prev() { return prev_; }
void Initialize() {
next_ = nullptr;
prev_ = nullptr;
}
private:
void set_next(T* next) { next_ = next; }
void set_prev(T* prev) { prev_ = prev; }
T* next_;
T* prev_;
friend class List<T>;
};
} // namespace base
} // namespace v8
#endif // V8_BASE_LIST_H_

View File

@ -145,10 +145,6 @@ V8_INLINE Dest bit_cast(Source const& source) {
void operator delete(void*, size_t) { base::OS::Abort(); } \
void operator delete[](void*, size_t) { base::OS::Abort(); }
// Newly written code should use V8_INLINE and V8_NOINLINE directly.
#define INLINE(declarator) V8_INLINE declarator
#define NO_INLINE(declarator) V8_NOINLINE declarator
// Define V8_USE_ADDRESS_SANITIZER macro.
#if defined(__has_feature)
#if __has_feature(address_sanitizer)

View File

@ -156,6 +156,8 @@ int ReclaimInaccessibleMemory(void* address, size_t size) {
#else
int ret = madvise(address, size, MADV_FREE);
#endif
if (ret != 0 && errno == ENOSYS)
return 0; // madvise is not available on all systems.
if (ret != 0 && errno == EINVAL) {
// MADV_FREE only works on Linux 4.5+ . If request failed, retry with older
// MADV_DONTNEED . Note that MADV_FREE being defined at compile time doesn't

View File

@ -48,7 +48,7 @@ namespace base {
#define V8_FAST_TLS_SUPPORTED 1
INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index));
V8_INLINE intptr_t InternalGetExistingThreadLocal(intptr_t index);
inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
const intptr_t kTibInlineTlsOffset = 0xE10;
@ -74,7 +74,7 @@ inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
extern V8_BASE_EXPORT intptr_t kMacTlsBaseOffset;
INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index));
V8_INLINE intptr_t InternalGetExistingThreadLocal(intptr_t index);
inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
intptr_t result;

View File

@ -86,6 +86,26 @@ V8_INLINE int64_t ClockNow(clockid_t clk_id) {
return 0;
#endif
}
V8_INLINE bool IsHighResolutionTimer(clockid_t clk_id) {
// Limit duration of timer resolution measurement to 100 ms. If we cannot
// measure timer resoltuion within this time, we assume a low resolution
// timer.
int64_t end =
ClockNow(clk_id) + 100 * v8::base::Time::kMicrosecondsPerMillisecond;
int64_t start, delta;
do {
start = ClockNow(clk_id);
// Loop until we can detect that the clock has changed. Non-HighRes timers
// will increment in chunks, i.e. 15ms. By spinning until we see a clock
// change, we detect the minimum time between measurements.
do {
delta = ClockNow(clk_id) - start;
} while (delta == 0);
} while (delta > 1 && start < end);
return delta <= 1;
}
#elif V8_OS_WIN
V8_INLINE bool IsQPCReliable() {
v8::base::CPU cpu;
@ -735,7 +755,16 @@ TimeTicks TimeTicks::Now() {
}
// static
bool TimeTicks::IsHighResolution() { return true; }
bool TimeTicks::IsHighResolution() {
#if V8_OS_MACOSX
return true;
#elif V8_OS_POSIX
static bool is_high_resolution = IsHighResolutionTimer(CLOCK_MONOTONIC);
return is_high_resolution;
#else
return true;
#endif
}
#endif // V8_OS_WIN

File diff suppressed because it is too large Load Diff

View File

@ -29,9 +29,11 @@ class SourceCodeCache final BASE_EMBEDDED {
bit_cast<Object**, FixedArray**>(&cache_));
}
bool Lookup(Vector<const char> name, Handle<SharedFunctionInfo>* handle);
bool Lookup(Isolate* isolate, Vector<const char> name,
Handle<SharedFunctionInfo>* handle);
void Add(Vector<const char> name, Handle<SharedFunctionInfo> shared);
void Add(Isolate* isolate, Vector<const char> name,
Handle<SharedFunctionInfo> shared);
private:
Script::Type type_;

View File

@ -28,7 +28,7 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
ExternalReference::Create(address).address()) &
1);
#endif
__ Move(r5, ExternalReference::Create(address));
__ Move(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
if (exit_frame_type == BUILTIN_EXIT) {
__ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
RelocInfo::CODE_TARGET);
@ -39,57 +39,6 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
}
}
namespace {
void AdaptorWithExitFrameType(MacroAssembler* masm,
Builtins::ExitFrameType exit_frame_type) {
// ----------- S t a t e -------------
// -- r0 : number of arguments excluding receiver
// -- r1 : target
// -- r3 : new.target
// -- r5 : entry point
// -- sp[0] : last argument
// -- ...
// -- sp[4 * (argc - 1)] : first argument
// -- sp[4 * argc] : receiver
// -----------------------------------
__ AssertFunction(r1);
// Make sure we operate in the context of the called function (for example
// ConstructStubs implemented in C++ will be run in the context of the caller
// instead of the callee, due to the way that [[Construct]] is defined for
// ordinary functions).
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// CEntry expects r0 to contain the number of arguments including the
// receiver and the extra arguments.
__ add(r0, r0, Operand(BuiltinExitFrameConstants::kNumExtraArgsWithReceiver));
// Insert extra arguments.
__ PushRoot(Heap::kTheHoleValueRootIndex); // Padding.
__ SmiTag(r0);
__ Push(r0, r1, r3);
__ SmiUntag(r0);
// Jump to the C entry runtime stub directly here instead of using
// JumpToExternalReference. We have already loaded entry point to r5
// in Generate_adaptor.
__ mov(r1, r5);
Handle<Code> code =
CodeFactory::CEntry(masm->isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
exit_frame_type == Builtins::BUILTIN_EXIT);
__ Jump(code, RelocInfo::CODE_TARGET);
}
} // namespace
void Builtins::Generate_AdaptorWithExitFrame(MacroAssembler* masm) {
AdaptorWithExitFrameType(masm, EXIT);
}
void Builtins::Generate_AdaptorWithBuiltinExitFrame(MacroAssembler* masm) {
AdaptorWithExitFrameType(masm, BUILTIN_EXIT);
}
void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
@ -111,39 +60,8 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// function.
// tail call a stub
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
InternalArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
// -- r1 : array function
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
if (FLAG_debug_code) {
// Initial map for the builtin Array functions should be maps.
__ ldr(r7, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
__ SmiTst(r7);
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction);
__ CompareObjectType(r7, r8, r9, MAP_TYPE);
__ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
}
// r2 is the AllocationSite - here undefined.
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
// If r3 (new target) is undefined, then this is the 'Call' case, so move
// r1 (the constructor) to r3.
__ cmp(r3, r2);
__ mov(r3, r1, LeaveCC, eq);
// Run the native code for the Array function called as a normal function.
// tail call a stub
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
__ Jump(BUILTIN_CODE(masm->isolate(), InternalArrayConstructorImpl),
RelocInfo::CODE_TARGET);
}
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
@ -494,20 +412,25 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- sp[0] : generator receiver
// -----------------------------------
// Push holes for arguments to generator function. Since the parser forced
// context allocation for any variables in generators, the actual argument
// values have already been copied into the context and these dummy values
// will never be used.
// Copy the function arguments from the generator object's register file.
__ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r3,
FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
__ ldrh(r3,
FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
__ ldr(r2,
FieldMemOperand(r1, JSGeneratorObject::kParametersAndRegistersOffset));
{
Label done_loop, loop;
__ mov(r6, Operand(0));
__ bind(&loop);
__ sub(r3, r3, Operand(1), SetCC);
__ b(mi, &done_loop);
__ PushRoot(Heap::kTheHoleValueRootIndex);
__ cmp(r6, r3);
__ b(ge, &done_loop);
__ add(scratch, r2, Operand(r6, LSL, kPointerSizeLog2));
__ ldr(scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize));
__ Push(scratch);
__ add(r6, r6, Operand(1));
__ b(&loop);
__ bind(&done_loop);
}
@ -523,8 +446,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Resume (Ignition/TurboFan) generator object.
{
__ ldr(r0, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r0, FieldMemOperand(
r0, SharedFunctionInfo::kFormalParameterCountOffset));
__ ldrh(r0, FieldMemOperand(
r0, SharedFunctionInfo::kFormalParameterCountOffset));
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
@ -906,17 +829,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
Label maybe_load_debug_bytecode_array, bytecode_array_loaded;
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
__ ldr(r0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ ldr(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r0, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, r4);
__ ldr(r4, FieldMemOperand(r0, SharedFunctionInfo::kDebugInfoOffset));
__ SmiTst(r4);
__ b(ne, &maybe_load_debug_bytecode_array);
__ bind(&bytecode_array_loaded);
// Increment invocation count for the function.
__ ldr(r9, FieldMemOperand(feedback_vector,
@ -1027,37 +945,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// The return value is in r0.
LeaveInterpreterFrame(masm, r2);
__ Jump(lr);
// Load debug copy of the bytecode array if it exists.
// kInterpreterBytecodeArrayRegister is already loaded with
// SharedFunctionInfo::kFunctionDataOffset.
__ bind(&maybe_load_debug_bytecode_array);
__ ldr(r9, FieldMemOperand(r4, DebugInfo::kDebugBytecodeArrayOffset), ne);
__ JumpIfRoot(r9, Heap::kUndefinedValueRootIndex, &bytecode_array_loaded);
__ mov(kInterpreterBytecodeArrayRegister, r9);
__ ldr(r9, FieldMemOperand(r4, DebugInfo::kFlagsOffset));
__ SmiUntag(r9);
__ And(r9, r9, Operand(DebugInfo::kDebugExecutionMode));
ExternalReference debug_execution_mode =
ExternalReference::debug_execution_mode_address(masm->isolate());
__ mov(r4, Operand(debug_execution_mode));
__ ldrsb(r4, MemOperand(r4));
STATIC_ASSERT(static_cast<int>(DebugInfo::kDebugExecutionMode) ==
static_cast<int>(DebugInfo::kSideEffects));
__ cmp(r4, r9);
__ b(eq, &bytecode_array_loaded);
__ push(closure);
__ push(feedback_vector);
__ push(kInterpreterBytecodeArrayRegister);
__ push(closure);
__ CallRuntime(Runtime::kDebugApplyInstrumentation);
__ pop(kInterpreterBytecodeArrayRegister);
__ pop(feedback_vector);
__ pop(closure);
__ b(&bytecode_array_loaded);
}
static void Generate_InterpreterPushArgs(MacroAssembler* masm,
@ -1160,8 +1047,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// Tail call to the array construct stub (still in the caller
// context at this point).
ArrayConstructorStub array_constructor_stub(masm->isolate());
__ Jump(array_constructor_stub.GetCode(), RelocInfo::CODE_TARGET);
Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl);
__ Jump(code, RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Call the constructor with r0, r1, and r3 unmodified.
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
@ -1278,208 +1165,6 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
// Set the code slot inside the JSFunction to CompileLazy.
__ Move(r2, BUILTIN_CODE(masm->isolate(), CompileLazy));
__ str(r2, FieldMemOperand(r1, JSFunction::kCodeOffset));
__ RecordWriteField(r1, JSFunction::kCodeOffset, r2, r4, kLRHasNotBeenSaved,
kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
// Jump to compile lazy.
Generate_CompileLazy(masm);
}
static void GetSharedFunctionInfoCode(MacroAssembler* masm, Register sfi_data,
Register scratch1) {
// Figure out the SFI's code object.
Label done;
Label check_is_bytecode_array;
Label check_is_exported_function_data;
Label check_is_fixed_array;
Label check_is_pre_parsed_scope_data;
Label check_is_function_template_info;
Label check_is_interpreter_data;
Register data_type = scratch1;
// IsSmi: Is builtin
__ JumpIfNotSmi(sfi_data, &check_is_bytecode_array);
__ Move(scratch1, ExternalReference::builtins_address(masm->isolate()));
__ ldr(sfi_data, MemOperand::PointerAddressFromSmiKey(scratch1, sfi_data));
__ b(&done);
// Get map for subsequent checks.
__ bind(&check_is_bytecode_array);
__ ldr(data_type, FieldMemOperand(sfi_data, HeapObject::kMapOffset));
__ ldrh(data_type, FieldMemOperand(data_type, Map::kInstanceTypeOffset));
// IsBytecodeArray: Interpret bytecode
__ cmp(data_type, Operand(BYTECODE_ARRAY_TYPE));
__ b(ne, &check_is_exported_function_data);
__ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
__ b(&done);
// IsWasmExportedFunctionData: Use the wrapper code
__ bind(&check_is_exported_function_data);
__ cmp(data_type, Operand(WASM_EXPORTED_FUNCTION_DATA_TYPE));
__ b(ne, &check_is_fixed_array);
__ ldr(sfi_data, FieldMemOperand(
sfi_data, WasmExportedFunctionData::kWrapperCodeOffset));
__ b(&done);
// IsFixedArray: Instantiate using AsmWasmData
__ bind(&check_is_fixed_array);
__ cmp(data_type, Operand(FIXED_ARRAY_TYPE));
__ b(ne, &check_is_pre_parsed_scope_data);
__ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InstantiateAsmJs));
__ b(&done);
// IsPreParsedScopeData: Compile lazy
__ bind(&check_is_pre_parsed_scope_data);
__ cmp(data_type, Operand(TUPLE2_TYPE));
__ b(ne, &check_is_function_template_info);
__ Move(sfi_data, BUILTIN_CODE(masm->isolate(), CompileLazy));
__ b(&done);
// IsFunctionTemplateInfo: API call
__ bind(&check_is_function_template_info);
__ cmp(data_type, Operand(FUNCTION_TEMPLATE_INFO_TYPE));
__ b(ne, &check_is_interpreter_data);
__ Move(sfi_data, BUILTIN_CODE(masm->isolate(), HandleApiCall));
__ b(&done);
// IsInterpreterData: Interpret bytecode
__ bind(&check_is_interpreter_data);
if (FLAG_debug_code) {
__ cmp(data_type, Operand(INTERPRETER_DATA_TYPE));
__ Assert(eq, AbortReason::kInvalidSharedFunctionInfoData);
}
__ ldr(
sfi_data,
FieldMemOperand(sfi_data, InterpreterData::kInterpreterTrampolineOffset));
__ bind(&done);
}
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argument count (preserved for callee)
// -- r3 : new target (preserved for callee)
// -- r1 : target function (preserved for callee)
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
Register closure = r1;
Register feedback_vector = r2;
// Do we have a valid feedback vector?
__ ldr(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
&gotta_call_runtime);
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, r4, r6, r5);
// We found no optimized code. Infer the code object needed for the SFI.
Register entry = r4;
__ ldr(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ ldr(entry,
FieldMemOperand(entry, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoCode(masm, entry, r5);
// If code entry points to anything other than CompileLazy, install that.
__ Move(r5, masm->CodeObject());
__ cmp(entry, r5);
__ b(eq, &gotta_call_runtime);
// Install the SFI's code entry.
__ str(entry, FieldMemOperand(closure, JSFunction::kCodeOffset));
__ mov(r9, entry); // Write barrier clobbers r9 below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, r9, r5,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(entry);
__ bind(&gotta_call_runtime);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
// Lazy deserialization design doc: http://goo.gl/dxkYDZ.
void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argument count (preserved for callee)
// -- r3 : new target (preserved for callee)
// -- r1 : target function (preserved for callee)
// -----------------------------------
Label deserialize_in_runtime;
Register target = r1; // Must be preserved
Register scratch0 = r2;
Register scratch1 = r4;
CHECK(scratch0 != r0 && scratch0 != r3 && scratch0 != r1);
CHECK(scratch1 != r0 && scratch1 != r3 && scratch1 != r1);
CHECK(scratch0 != scratch1);
// Load the builtin id for lazy deserialization from SharedFunctionInfo.
__ AssertFunction(target);
__ ldr(scratch0,
FieldMemOperand(target, JSFunction::kSharedFunctionInfoOffset));
__ ldr(scratch1,
FieldMemOperand(scratch0, SharedFunctionInfo::kFunctionDataOffset));
__ AssertSmi(scratch1);
// The builtin may already have been deserialized. If that is the case, it is
// stored in the builtins table, and we can copy to correct code object to
// both the shared function info and function without calling into runtime.
//
// Otherwise, we need to call into runtime to deserialize.
{
// Load the code object at builtins_table[builtin_id] into scratch1.
__ SmiUntag(scratch1);
__ Move(scratch0, ExternalReference::builtins_address(masm->isolate()));
__ ldr(scratch1, MemOperand(scratch0, scratch1, LSL, kPointerSizeLog2));
// Check if the loaded code object has already been deserialized. This is
// the case iff it does not equal DeserializeLazy.
__ Move(scratch0, masm->CodeObject());
__ cmp(scratch1, scratch0);
__ b(eq, &deserialize_in_runtime);
}
{
// If we've reached this spot, the target builtin has been deserialized and
// we simply need to copy it over to the target function.
Register target_builtin = scratch1;
__ str(target_builtin, FieldMemOperand(target, JSFunction::kCodeOffset));
__ mov(r9, target_builtin); // Write barrier clobbers r9 below.
__ RecordWriteField(target, JSFunction::kCodeOffset, r9, r5,
kLRHasNotBeenSaved, kDontSaveFPRegs,
OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
// All copying is done. Jump to the deserialized code object.
__ add(target_builtin, target_builtin,
Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(target_builtin);
}
__ bind(&deserialize_in_runtime);
GenerateTailCallToReturnedCode(masm, Runtime::kDeserializeLazy);
}
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argument count (preserved for callee)
@ -1892,10 +1577,27 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// -- r4 : len (number of elements to push from args)
// -- r3 : new.target (for [[Construct]])
// -----------------------------------
__ AssertFixedArray(r2);
Register scratch = r8;
if (masm->emit_debug_code()) {
// Allow r2 to be a FixedArray, or a FixedDoubleArray if r4 == 0.
Label ok, fail;
__ AssertNotSmi(r2);
__ ldr(scratch, FieldMemOperand(r2, HeapObject::kMapOffset));
__ ldrh(r6, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
__ cmp(r6, Operand(FIXED_ARRAY_TYPE));
__ b(eq, &ok);
__ cmp(r6, Operand(FIXED_DOUBLE_ARRAY_TYPE));
__ b(ne, &fail);
__ cmp(r4, Operand(0));
__ b(eq, &ok);
// Fall through.
__ bind(&fail);
__ Abort(AbortReason::kOperandIsNotAFixedArray);
__ bind(&ok);
}
// Check for stack overflow.
{
// Check the stack for overflow. We are not trying to catch interruptions
@ -1977,8 +1679,8 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
{
__ ldr(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r5, FieldMemOperand(r5, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r5, FieldMemOperand(
r5, SharedFunctionInfo::kFormalParameterCountOffset));
__ ldrh(r5, FieldMemOperand(
r5, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(r4, fp);
}
__ b(&arguments_done);
@ -2108,8 +1810,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- cp : the function context.
// -----------------------------------
__ ldr(r2,
FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
__ ldrh(r2,
FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount actual(r0);
ParameterCount expected(r2);
__ InvokeFunctionCode(r1, no_reg, expected, actual, JUMP_FUNCTION);
@ -2378,42 +2080,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r1 : requested object size (untagged)
// -- lr : return address
// -----------------------------------
__ SmiTag(r1);
__ Push(r1);
__ Move(cp, Smi::kZero);
__ TailCallRuntime(Runtime::kAllocateInNewSpace);
}
// static
void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r1 : requested object size (untagged)
// -- lr : return address
// -----------------------------------
__ SmiTag(r1);
__ Move(r2, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ Push(r1, r2);
__ Move(cp, Smi::kZero);
__ TailCallRuntime(Runtime::kAllocateInTargetSpace);
}
// static
void Builtins::Generate_Abort(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r1 : message_id as Smi
// -- lr : return address
// -----------------------------------
__ Push(r1);
__ Move(cp, Smi::kZero);
__ TailCallRuntime(Runtime::kAbort);
}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : actual number of arguments
@ -2425,10 +2091,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
Label invoke, dont_adapt_arguments, stack_overflow;
Label enough, too_few;
__ cmp(r0, r2);
__ b(lt, &too_few);
__ cmp(r2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
__ b(eq, &dont_adapt_arguments);
__ cmp(r0, r2);
__ b(lt, &too_few);
Register scratch = r5;
@ -2547,29 +2213,36 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in r4 by the jump table trampoline.
// Convert to Smi for the runtime call.
__ SmiTag(r4, r4);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
FrameAndConstantPoolScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
// Save all parameter registers (see wasm-linkage.cc). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
constexpr RegList gp_regs = Register::ListOf<r0, r1, r2>();
constexpr RegList gp_regs = Register::ListOf<r0, r1, r2, r3>();
constexpr DwVfpRegister lowest_fp_reg = d0;
constexpr DwVfpRegister highest_fp_reg = d7;
__ stm(db_w, sp, gp_regs);
__ vstm(db_w, sp, lowest_fp_reg, highest_fp_reg);
// Pass the WASM instance as an explicit argument to WasmCompileLazy.
// Pass instance and function index as explicit arguments to the runtime
// function.
__ push(kWasmInstanceRegister);
__ push(r4);
// Load the correct CEntry builtin from the instance object.
__ ldr(r2, FieldMemOperand(kWasmInstanceRegister,
WasmInstanceObject::kCEntryStubOffset));
// Initialize the JavaScript context with 0. CEntry will use it to
// set the current context on the isolate.
__ Move(cp, Smi::kZero);
__ CallRuntime(Runtime::kWasmCompileLazy);
// The entrypoint address is the first return value.
__ CallRuntimeWithCEntry(Runtime::kWasmCompileLazy, r2);
// The entrypoint address is the return value.
__ mov(r8, kReturnRegister0);
// The WASM instance is the second return value.
__ mov(kWasmInstanceRegister, kReturnRegister1);
// Restore registers.
__ vldm(ia_w, sp, lowest_fp_reg, highest_fp_reg);
@ -2741,6 +2414,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
Label negate, done;
TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
UseScratchRegisterScope temps(masm);
Register result_reg = r7;
Register double_low = GetRegisterThatIsNotOneOf(result_reg);
@ -2832,20 +2506,20 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
}
void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
const Register exponent = MathPowTaggedDescriptor::exponent();
DCHECK(exponent == r2);
const LowDwVfpRegister double_base = d0;
const LowDwVfpRegister double_exponent = d1;
const LowDwVfpRegister double_result = d2;
const LowDwVfpRegister double_scratch = d3;
const SwVfpRegister single_scratch = s6;
const Register scratch = r9;
const Register scratch2 = r4;
// Avoid using Registers r0-r3 as they may be needed when calling to C if the
// ABI is softfloat.
const Register integer_exponent = r4;
const Register scratch = r5;
Label call_runtime, done, int_exponent;
// Detect integer exponents stored as double.
__ TryDoubleToInt32Exact(scratch, double_exponent, double_scratch);
__ TryDoubleToInt32Exact(integer_exponent, double_exponent, double_scratch);
__ b(eq, &int_exponent);
__ push(lr);
@ -2862,16 +2536,13 @@ void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
// Calculate power with integer exponent.
__ bind(&int_exponent);
// Get two copies of exponent in the registers scratch and exponent.
// Exponent has previously been stored into scratch as untagged integer.
__ mov(exponent, scratch);
__ vmov(double_scratch, double_base); // Back up base.
__ vmov(double_result, Double(1.0), scratch2);
__ vmov(double_result, Double(1.0), scratch);
// Get absolute value of exponent.
__ cmp(scratch, Operand::Zero());
__ rsb(scratch, scratch, Operand::Zero(), LeaveCC, mi);
__ cmp(integer_exponent, Operand::Zero());
__ mov(scratch, integer_exponent);
__ rsb(scratch, integer_exponent, Operand::Zero(), LeaveCC, mi);
Label while_true;
__ bind(&while_true);
@ -2880,7 +2551,7 @@ void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
__ vmul(double_scratch, double_scratch, double_scratch, ne);
__ b(ne, &while_true);
__ cmp(exponent, Operand::Zero());
__ cmp(integer_exponent, Operand::Zero());
__ b(ge, &done);
__ vmov(double_scratch, Double(1.0), scratch);
__ vdiv(double_result, double_scratch, double_result);
@ -2890,7 +2561,7 @@ void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
__ b(ne, &done);
// double_exponent may not containe the exponent value if the input was a
// smi. We set it with exponent value before bailing out.
__ vmov(single_scratch, exponent);
__ vmov(single_scratch, integer_exponent);
__ vcvt_f64_s32(double_exponent, single_scratch);
// Returning or bailing out.
@ -2908,6 +2579,88 @@ void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
__ Ret();
}
namespace {
void GenerateInternalArrayConstructorCase(MacroAssembler* masm,
ElementsKind kind) {
__ cmp(r0, Operand(1));
__ Jump(CodeFactory::InternalArrayNoArgumentConstructor(masm->isolate(), kind)
.code(),
RelocInfo::CODE_TARGET, lo);
Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayNArgumentsConstructor);
__ Jump(code, RelocInfo::CODE_TARGET, hi);
if (IsFastPackedElementsKind(kind)) {
// We might need to create a holey array
// look at the first argument
__ ldr(r3, MemOperand(sp, 0));
__ cmp(r3, Operand::Zero());
__ Jump(CodeFactory::InternalArraySingleArgumentConstructor(
masm->isolate(), GetHoleyElementsKind(kind))
.code(),
RelocInfo::CODE_TARGET, ne);
}
__ Jump(
CodeFactory::InternalArraySingleArgumentConstructor(masm->isolate(), kind)
.code(),
RelocInfo::CODE_TARGET);
}
} // namespace
void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : argc
// -- r1 : constructor
// -- sp[0] : return address
// -- sp[4] : last argument
// -----------------------------------
if (FLAG_debug_code) {
// The array construct code is only set for the global and natives
// builtin Array functions which always have maps.
// Initial map for the builtin Array function should be a map.
__ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ tst(r3, Operand(kSmiTagMask));
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction);
__ CompareObjectType(r3, r3, r4, MAP_TYPE);
__ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
}
// Figure out the right elements kind
__ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
// Load the map's "bit field 2" into |result|. We only need the first byte,
// but the following bit field extraction takes care of that anyway.
__ ldr(r3, FieldMemOperand(r3, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
__ DecodeField<Map::ElementsKindBits>(r3);
if (FLAG_debug_code) {
Label done;
__ cmp(r3, Operand(PACKED_ELEMENTS));
__ b(eq, &done);
__ cmp(r3, Operand(HOLEY_ELEMENTS));
__ Assert(
eq,
AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray);
__ bind(&done);
}
Label fast_elements_case;
__ cmp(r3, Operand(PACKED_ELEMENTS));
__ b(eq, &fast_elements_case);
GenerateInternalArrayConstructorCase(masm, HOLEY_ELEMENTS);
__ bind(&fast_elements_case);
GenerateInternalArrayConstructorCase(masm, PACKED_ELEMENTS);
}
#undef __
} // namespace internal

View File

@ -22,7 +22,7 @@ namespace internal {
void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
ExitFrameType exit_frame_type) {
__ Mov(x5, ExternalReference::Create(address));
__ Mov(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
if (exit_frame_type == BUILTIN_EXIT) {
__ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
RelocInfo::CODE_TARGET);
@ -33,57 +33,6 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address,
}
}
namespace {
void AdaptorWithExitFrameType(MacroAssembler* masm,
Builtins::ExitFrameType exit_frame_type) {
// ----------- S t a t e -------------
// -- x0 : number of arguments excluding receiver
// -- x1 : target
// -- x3 : new target
// -- x5 : entry point
// -- sp[0] : last argument
// -- ...
// -- sp[4 * (argc - 1)] : first argument
// -- sp[4 * argc] : receiver
// -----------------------------------
__ AssertFunction(x1);
// Make sure we operate in the context of the called function (for example
// ConstructStubs implemented in C++ will be run in the context of the caller
// instead of the callee, due to the way that [[Construct]] is defined for
// ordinary functions).
__ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
// CEntry expects x0 to contain the number of arguments including the
// receiver and the extra arguments.
__ Add(x0, x0, BuiltinExitFrameConstants::kNumExtraArgsWithReceiver);
// Insert extra arguments.
Register padding = x10;
__ LoadRoot(padding, Heap::kTheHoleValueRootIndex);
__ SmiTag(x11, x0);
__ Push(padding, x11, x1, x3);
// Jump to the C entry runtime stub directly here instead of using
// JumpToExternalReference. We have already loaded entry point to x5
// in Generate_adaptor.
__ Mov(x1, x5);
Handle<Code> code =
CodeFactory::CEntry(masm->isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
exit_frame_type == Builtins::BUILTIN_EXIT);
__ Jump(code, RelocInfo::CODE_TARGET);
}
} // namespace
void Builtins::Generate_AdaptorWithExitFrame(MacroAssembler* masm) {
AdaptorWithExitFrameType(masm, EXIT);
}
void Builtins::Generate_AdaptorWithBuiltinExitFrame(MacroAssembler* masm) {
AdaptorWithExitFrameType(masm, BUILTIN_EXIT);
}
void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : number of arguments
@ -105,39 +54,8 @@ void Builtins::Generate_InternalArrayConstructor(MacroAssembler* masm) {
// Run the native code for the InternalArray function called as a normal
// function.
__ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
InternalArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
void Builtins::Generate_ArrayConstructor(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : number of arguments
// -- x1 : array function
// -- lr : return address
// -- sp[...]: constructor arguments
// -----------------------------------
ASM_LOCATION("Builtins::Generate_ArrayConstructor");
Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
if (FLAG_debug_code) {
// Initial map for the builtin Array functions should be maps.
__ Ldr(x10, FieldMemOperand(x1, JSFunction::kPrototypeOrInitialMapOffset));
__ Tst(x10, kSmiTagMask);
__ Assert(ne, AbortReason::kUnexpectedInitialMapForArrayFunction);
__ CompareObjectType(x10, x11, x12, MAP_TYPE);
__ Assert(eq, AbortReason::kUnexpectedInitialMapForArrayFunction);
}
// x2 is the AllocationSite - here undefined.
__ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
// If x3 (new target) is undefined, then this is the 'Call' case, so move
// x1 (the constructor) to x3.
__ Cmp(x3, x2);
__ CmovX(x3, x1, eq);
// Run the native code for the Array function called as a normal function.
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
__ Jump(BUILTIN_CODE(masm->isolate(), InternalArrayConstructorImpl),
RelocInfo::CODE_TARGET);
}
static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
@ -266,8 +184,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Restore smi-tagged arguments count from the frame. Use fp relative
// addressing to avoid the circular dependency between padding existence and
// argc parity.
__ Ldrsw(x1,
UntagSmiMemOperand(fp, ConstructFrameConstants::kLengthOffset));
__ SmiUntag(x1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
// Leave construct frame.
}
@ -351,8 +268,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Restore constructor function and argument count.
__ Ldr(x1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
__ Ldrsw(x12,
UntagSmiMemOperand(fp, ConstructFrameConstants::kLengthOffset));
__ SmiUntag(x12, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
// Copy arguments to the expression stack. The called function pops the
// receiver along with its arguments, so we need an extra receiver on the
@ -451,8 +367,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ Bind(&leave_frame);
// Restore smi-tagged arguments count from the frame.
__ Ldrsw(x1,
UntagSmiMemOperand(fp, ConstructFrameConstants::kLengthOffset));
__ SmiUntag(x1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
// Leave construct frame.
}
// Remove caller arguments from the stack and return.
@ -513,8 +428,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Get number of arguments for generator function.
__ Ldr(x10, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(w10,
FieldMemOperand(x10, SharedFunctionInfo::kFormalParameterCountOffset));
__ Ldrh(w10, FieldMemOperand(
x10, SharedFunctionInfo::kFormalParameterCountOffset));
// Claim slots for arguments and receiver (rounded up to a multiple of two).
__ Add(x11, x10, 2);
@ -539,18 +454,21 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// -- sp[0 .. arg count - 1] : claimed for args
// -----------------------------------
// Push holes for arguments to generator function. Since the parser forced
// context allocation for any variables in generators, the actual argument
// values have already been copied into the context and these dummy values
// will never be used.
// Copy the function arguments from the generator object's register file.
__ Ldr(x5,
FieldMemOperand(x1, JSGeneratorObject::kParametersAndRegistersOffset));
{
Label loop, done;
__ Cbz(x10, &done);
__ LoadRoot(x11, Heap::kTheHoleValueRootIndex);
__ Mov(x12, 0);
__ Bind(&loop);
__ Sub(x10, x10, 1);
__ Add(x11, x5, Operand(x12, LSL, kPointerSizeLog2));
__ Ldr(x11, FieldMemOperand(x11, FixedArray::kHeaderSize));
__ Poke(x11, Operand(x10, LSL, kPointerSizeLog2));
__ Add(x12, x12, 1);
__ Cbnz(x10, &loop);
__ Bind(&done);
}
@ -571,8 +489,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
// Resume (Ignition/TurboFan) generator object.
{
__ Ldr(x0, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(w0, FieldMemOperand(
x0, SharedFunctionInfo::kFormalParameterCountOffset));
__ Ldrh(w0, FieldMemOperand(
x0, SharedFunctionInfo::kFormalParameterCountOffset));
// We abuse new.target both to indicate that this is a resume call and to
// pass in the generator object. In ordinary calls, new.target is always
// undefined because generator functions are non-constructable.
@ -983,10 +901,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Push(lr, fp, cp, closure);
__ Add(fp, sp, StandardFrameConstants::kFixedFrameSizeFromFp);
// Get the bytecode array from the function object (or from the DebugInfo if
// it is present) and load it into kInterpreterBytecodeArrayRegister.
Label maybe_load_debug_bytecode_array, bytecode_array_loaded,
has_bytecode_array;
// Get the bytecode array from the function object and load it into
// kInterpreterBytecodeArrayRegister.
Label has_bytecode_array;
__ Ldr(x0, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(kInterpreterBytecodeArrayRegister,
FieldMemOperand(x0, SharedFunctionInfo::kFunctionDataOffset));
@ -997,9 +914,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FieldMemOperand(kInterpreterBytecodeArrayRegister,
InterpreterData::kBytecodeArrayOffset));
__ Bind(&has_bytecode_array);
__ Ldr(x11, FieldMemOperand(x0, SharedFunctionInfo::kDebugInfoOffset));
__ JumpIfNotSmi(x11, &maybe_load_debug_bytecode_array);
__ Bind(&bytecode_array_loaded);
// Increment invocation count for the function.
__ Ldr(x11, FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
@ -1112,31 +1026,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// The return value is in x0.
LeaveInterpreterFrame(masm, x2);
__ Ret();
// Load debug copy of the bytecode array if it exists.
// kInterpreterBytecodeArrayRegister is already loaded with
// SharedFunctionInfo::kFunctionDataOffset.
__ Bind(&maybe_load_debug_bytecode_array);
__ Ldr(x10, FieldMemOperand(x11, DebugInfo::kDebugBytecodeArrayOffset));
__ JumpIfRoot(x10, Heap::kUndefinedValueRootIndex, &bytecode_array_loaded);
__ Mov(kInterpreterBytecodeArrayRegister, x10);
__ Ldr(x10, UntagSmiFieldMemOperand(x11, DebugInfo::kFlagsOffset));
__ And(x10, x10, Immediate(DebugInfo::kDebugExecutionMode));
STATIC_ASSERT(static_cast<int>(DebugInfo::kDebugExecutionMode) ==
static_cast<int>(DebugInfo::kSideEffects));
ExternalReference debug_execution_mode =
ExternalReference::debug_execution_mode_address(masm->isolate());
__ Mov(x11, Operand(debug_execution_mode));
__ Ldrsb(x11, MemOperand(x11));
__ CompareAndBranch(x10, x11, eq, &bytecode_array_loaded);
__ Push(closure, feedback_vector);
__ PushArgument(closure);
__ CallRuntime(Runtime::kDebugApplyInstrumentation);
__ Pop(feedback_vector, closure);
__ jmp(&bytecode_array_loaded);
}
static void Generate_InterpreterPushArgs(MacroAssembler* masm,
@ -1274,8 +1163,8 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// Tail call to the array construct stub (still in the caller
// context at this point).
ArrayConstructorStub array_constructor_stub(masm->isolate());
__ Jump(array_constructor_stub.GetCode(), RelocInfo::CODE_TARGET);
Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl);
__ Jump(code, RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Call the constructor with x0, x1, and x3 unmodified.
__ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
@ -1383,209 +1272,6 @@ void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
Generate_InterpreterEnterBytecode(masm);
}
void Builtins::Generate_CompileLazyDeoptimizedCode(MacroAssembler* masm) {
// Set the code slot inside the JSFunction to CompileLazy.
__ Move(x2, BUILTIN_CODE(masm->isolate(), CompileLazy));
__ Str(x2, FieldMemOperand(x1, JSFunction::kCodeOffset));
__ RecordWriteField(x1, JSFunction::kCodeOffset, x2, x5, kLRHasNotBeenSaved,
kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
// Jump to compile lazy.
Generate_CompileLazy(masm);
}
static void GetSharedFunctionInfoCode(MacroAssembler* masm, Register sfi_data,
Register scratch1) {
// Figure out the SFI's code object.
Label done;
Label check_is_bytecode_array;
Label check_is_exported_function_data;
Label check_is_fixed_array;
Label check_is_pre_parsed_scope_data;
Label check_is_function_template_info;
Label check_is_interpreter_data;
Register data_type = scratch1;
// IsSmi: Is builtin
__ JumpIfNotSmi(sfi_data, &check_is_bytecode_array);
__ Mov(scratch1, ExternalReference::builtins_address(masm->isolate()));
__ Mov(sfi_data, Operand::UntagSmiAndScale(sfi_data, kPointerSizeLog2));
__ Ldr(sfi_data, MemOperand(scratch1, sfi_data));
__ B(&done);
// Get map for subsequent checks.
__ Bind(&check_is_bytecode_array);
__ Ldr(data_type, FieldMemOperand(sfi_data, HeapObject::kMapOffset));
__ Ldrh(data_type, FieldMemOperand(data_type, Map::kInstanceTypeOffset));
// IsBytecodeArray: Interpret bytecode
__ Cmp(data_type, Operand(BYTECODE_ARRAY_TYPE));
__ B(ne, &check_is_exported_function_data);
__ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InterpreterEntryTrampoline));
__ B(&done);
// IsWasmExportedFunctionData: Use the wrapper code
__ Bind(&check_is_exported_function_data);
__ Cmp(data_type, Operand(WASM_EXPORTED_FUNCTION_DATA_TYPE));
__ B(ne, &check_is_fixed_array);
__ Ldr(sfi_data, FieldMemOperand(
sfi_data, WasmExportedFunctionData::kWrapperCodeOffset));
__ B(&done);
// IsFixedArray: Instantiate using AsmWasmData
__ Bind(&check_is_fixed_array);
__ Cmp(data_type, Operand(FIXED_ARRAY_TYPE));
__ B(ne, &check_is_pre_parsed_scope_data);
__ Move(sfi_data, BUILTIN_CODE(masm->isolate(), InstantiateAsmJs));
__ B(&done);
// IsPreParsedScopeData: Compile lazy
__ Bind(&check_is_pre_parsed_scope_data);
__ Cmp(data_type, Operand(TUPLE2_TYPE));
__ B(ne, &check_is_function_template_info);
__ Move(sfi_data, BUILTIN_CODE(masm->isolate(), CompileLazy));
__ B(&done);
// IsFunctionTemplateInfo: API call
__ Bind(&check_is_function_template_info);
__ Cmp(data_type, Operand(FUNCTION_TEMPLATE_INFO_TYPE));
__ B(ne, &check_is_interpreter_data);
__ Move(sfi_data, BUILTIN_CODE(masm->isolate(), HandleApiCall));
__ B(&done);
// IsInterpreterData: Interpret bytecode
__ Bind(&check_is_interpreter_data);
if (FLAG_debug_code) {
__ Cmp(data_type, Operand(INTERPRETER_DATA_TYPE));
__ Assert(eq, AbortReason::kInvalidSharedFunctionInfoData);
}
__ Ldr(
sfi_data,
FieldMemOperand(sfi_data, InterpreterData::kInterpreterTrampolineOffset));
__ Bind(&done);
}
void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argument count (preserved for callee)
// -- x3 : new target (preserved for callee)
// -- x1 : target function (preserved for callee)
// -----------------------------------
// First lookup code, maybe we don't need to compile!
Label gotta_call_runtime;
Register closure = x1;
Register feedback_vector = x2;
// Do we have a valid feedback vector?
__ Ldr(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ Ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
__ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex,
&gotta_call_runtime);
// Is there an optimization marker or optimized code in the feedback vector?
MaybeTailCallOptimizedCodeSlot(masm, feedback_vector, x7, x4, x5);
// We found no optimized code. Infer the code object needed for the SFI.
Register entry = x7;
__ Ldr(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(entry,
FieldMemOperand(entry, SharedFunctionInfo::kFunctionDataOffset));
GetSharedFunctionInfoCode(masm, entry, x5);
// If code entry points to anything other than CompileLazy, install that.
__ Move(x5, masm->CodeObject());
__ Cmp(entry, x5);
__ B(eq, &gotta_call_runtime);
// Install the SFI's code entry.
__ Str(entry, FieldMemOperand(closure, JSFunction::kCodeOffset));
__ Mov(x10, entry); // Write barrier clobbers x10 below.
__ RecordWriteField(closure, JSFunction::kCodeOffset, x10, x5,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ Add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(entry);
__ Bind(&gotta_call_runtime);
GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
}
// Lazy deserialization design doc: http://goo.gl/dxkYDZ.
void Builtins::Generate_DeserializeLazy(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argument count (preserved for callee)
// -- x3 : new target (preserved for callee)
// -- x1 : target function (preserved for callee)
// -----------------------------------
Label deserialize_in_runtime;
Register target = x1; // Must be preserved
Register scratch0 = x2;
Register scratch1 = x4;
CHECK(!scratch0.is(x0) && !scratch0.is(x3) && !scratch0.is(x1));
CHECK(!scratch1.is(x0) && !scratch1.is(x3) && !scratch1.is(x1));
CHECK(!scratch0.is(scratch1));
// Load the builtin id for lazy deserialization from SharedFunctionInfo.
__ AssertFunction(target);
__ Ldr(scratch0,
FieldMemOperand(target, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(scratch1,
FieldMemOperand(scratch0, SharedFunctionInfo::kFunctionDataOffset));
__ AssertSmi(scratch1);
// The builtin may already have been deserialized. If that is the case, it is
// stored in the builtins table, and we can copy to correct code object to
// both the shared function info and function without calling into runtime.
//
// Otherwise, we need to call into runtime to deserialize.
{
// Load the code object at builtins_table[builtin_id] into scratch1.
__ SmiUntag(scratch1);
__ Mov(scratch0, ExternalReference::builtins_address(masm->isolate()));
__ Ldr(scratch1, MemOperand(scratch0, scratch1, LSL, kPointerSizeLog2));
// Check if the loaded code object has already been deserialized. This is
// the case iff it does not equal DeserializeLazy.
__ Move(scratch0, masm->CodeObject());
__ Cmp(scratch1, scratch0);
__ B(eq, &deserialize_in_runtime);
}
{
// If we've reached this spot, the target builtin has been deserialized and
// we simply need to copy it over to the target function.
Register target_builtin = scratch1;
__ Str(target_builtin, FieldMemOperand(target, JSFunction::kCodeOffset));
__ Mov(x9, target_builtin); // Write barrier clobbers x9 below.
__ RecordWriteField(target, JSFunction::kCodeOffset, x9, x5,
kLRHasNotBeenSaved, kDontSaveFPRegs,
OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
// All copying is done. Jump to the deserialized code object.
__ Add(target_builtin, target_builtin,
Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(target_builtin);
}
__ bind(&deserialize_in_runtime);
GenerateTailCallToReturnedCode(masm, Runtime::kDeserializeLazy);
}
void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argument count (preserved for callee)
@ -1656,7 +1342,7 @@ void Builtins::Generate_InstantiateAsmJs(MacroAssembler* masm) {
__ JumpIfSmi(x0, &failed);
// Peek the argument count from the stack, untagging at the same time.
__ Ldr(w4, UntagSmiMemOperand(sp, 3 * kPointerSize));
__ SmiUntag(x4, MemOperand(sp, 3 * kPointerSize));
__ Drop(4);
scope.GenerateLeaveFrame();
@ -1800,9 +1486,9 @@ static void Generate_OnStackReplacementHelper(MacroAssembler* masm,
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
__ Ldrsw(w1, UntagSmiFieldMemOperand(
x1, FixedArray::OffsetOfElementAt(
DeoptimizationData::kOsrPcOffsetIndex)));
__ SmiUntag(x1,
FieldMemOperand(x1, FixedArray::OffsetOfElementAt(
DeoptimizationData::kOsrPcOffsetIndex)));
// Compute the target address = code_obj + header_size + osr_offset
// <entry_addr> = <code_obj> + #header_size + <osr_offset>
@ -2222,7 +1908,24 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// -- x4 : len (number of elements to push from args)
// -- x3 : new.target (for [[Construct]])
// -----------------------------------
__ AssertFixedArray(x2);
if (masm->emit_debug_code()) {
// Allow x2 to be a FixedArray, or a FixedDoubleArray if x4 == 0.
Label ok, fail;
__ AssertNotSmi(x2, AbortReason::kOperandIsNotAFixedArray);
__ Ldr(x10, FieldMemOperand(x2, HeapObject::kMapOffset));
__ Ldrh(x13, FieldMemOperand(x10, Map::kInstanceTypeOffset));
__ Cmp(x13, FIXED_ARRAY_TYPE);
__ B(eq, &ok);
__ Cmp(x13, FIXED_DOUBLE_ARRAY_TYPE);
__ B(ne, &fail);
__ Cmp(x4, 0);
__ B(eq, &ok);
// Fall through.
__ bind(&fail);
__ Abort(AbortReason::kOperandIsNotAFixedArray);
__ bind(&ok);
}
Register arguments_list = x2;
Register argc = x0;
@ -2328,18 +2031,18 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ Ldr(scratch,
FieldMemOperand(scratch, JSFunction::kSharedFunctionInfoOffset));
__ Ldrsw(len,
FieldMemOperand(
scratch, SharedFunctionInfo::kFormalParameterCountOffset));
__ Ldrh(len,
FieldMemOperand(scratch,
SharedFunctionInfo::kFormalParameterCountOffset));
__ Mov(args_fp, fp);
}
__ B(&arguments_done);
__ Bind(&arguments_adaptor);
{
// Just load the length from ArgumentsAdaptorFrame.
__ Ldrsw(len,
UntagSmiMemOperand(
args_fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiUntag(
len,
MemOperand(args_fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
}
__ Bind(&arguments_done);
}
@ -2455,8 +2158,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
// -- cp : the function context.
// -----------------------------------
__ Ldrsw(
x2, FieldMemOperand(x2, SharedFunctionInfo::kFormalParameterCountOffset));
__ Ldrh(x2,
FieldMemOperand(x2, SharedFunctionInfo::kFormalParameterCountOffset));
ParameterCount actual(x0);
ParameterCount expected(x2);
__ InvokeFunctionCode(x1, no_reg, expected, actual, JUMP_FUNCTION);
@ -2486,8 +2189,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
Label no_bound_arguments;
__ Ldr(bound_argv,
FieldMemOperand(x1, JSBoundFunction::kBoundArgumentsOffset));
__ Ldrsw(bound_argc,
UntagSmiFieldMemOperand(bound_argv, FixedArray::kLengthOffset));
__ SmiUntag(bound_argc,
FieldMemOperand(bound_argv, FixedArray::kLengthOffset));
__ Cbz(bound_argc, &no_bound_arguments);
{
// ----------- S t a t e -------------
@ -2774,46 +2477,6 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_AllocateInNewSpace");
// ----------- S t a t e -------------
// -- x1 : requested object size (untagged)
// -- lr : return address
// -----------------------------------
__ SmiTag(x1);
__ PushArgument(x1);
__ Move(cp, Smi::kZero);
__ TailCallRuntime(Runtime::kAllocateInNewSpace);
}
// static
void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_AllocateInOldSpace");
// ----------- S t a t e -------------
// -- x1 : requested object size (untagged)
// -- lr : return address
// -----------------------------------
__ SmiTag(x1);
__ Move(x2, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ Push(x1, x2);
__ Move(cp, Smi::kZero);
__ TailCallRuntime(Runtime::kAllocateInTargetSpace);
}
// static
void Builtins::Generate_Abort(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_Abort");
// ----------- S t a t e -------------
// -- x1 : message_id as Smi
// -- lr : return address
// -----------------------------------
MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm);
__ PushArgument(x1);
__ Move(cp, Smi::kZero);
__ TailCallRuntime(Runtime::kAbort);
}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_ArgumentsAdaptorTrampoline");
// ----------- S t a t e -------------
@ -3007,32 +2670,38 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
}
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in w8 by the jump table trampoline.
// Sign extend and convert to Smi for the runtime call.
__ sxtw(x8, w8);
__ SmiTag(x8, x8);
{
FrameScope scope(masm, StackFrame::INTERNAL);
TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
// Save all parameter registers (see wasm-linkage.cc). They might be
// overwritten in the runtime call below. We don't have any callee-saved
// registers in wasm, so no need to store anything else.
constexpr RegList gp_regs = Register::ListOf<x0, x1, x2, x3, x4, x5>();
constexpr RegList gp_regs =
Register::ListOf<x0, x1, x2, x3, x4, x5, x6, x7>();
constexpr RegList fp_regs =
Register::ListOf<d0, d1, d2, d3, d4, d5, d6, d7>();
__ PushXRegList(gp_regs);
__ PushDRegList(fp_regs);
__ Push(x5, x6); // note: pushed twice because alignment required
// Pass the WASM instance as an explicit argument to WasmCompileLazy.
__ PushArgument(kWasmInstanceRegister);
// Pass instance and function index as explicit arguments to the runtime
// function.
__ Push(kWasmInstanceRegister, x8);
// Load the correct CEntry builtin from the instance object.
__ Ldr(x2, FieldMemOperand(kWasmInstanceRegister,
WasmInstanceObject::kCEntryStubOffset));
// Initialize the JavaScript context with 0. CEntry will use it to
// set the current context on the isolate.
__ Move(cp, Smi::kZero);
__ CallRuntime(Runtime::kWasmCompileLazy);
// The entrypoint address is the first return value.
__ CallRuntimeWithCEntry(Runtime::kWasmCompileLazy, x2);
// The entrypoint address is the return value.
__ mov(x8, kReturnRegister0);
// The WASM instance is the second return value.
__ mov(kWasmInstanceRegister, kReturnRegister1);
// Restore registers.
__ Pop(x6, x5); // note: pushed twice because alignment required
__ PopDRegList(fp_regs);
__ PopXRegList(gp_regs);
}
@ -3268,6 +2937,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
DCHECK(result.Is64Bits());
TrapOnAbortScope trap_on_abort_scope(masm); // Avoid calls to Abort.
UseScratchRegisterScope temps(masm);
Register scratch1 = temps.AcquireX();
Register scratch2 = temps.AcquireX();
@ -3329,16 +2999,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
}
void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
// Stack on entry:
// sp[0]: Exponent (as a tagged value).
// sp[1]: Base (as a tagged value).
//
// The (tagged) result will be returned in x0, as a heap number.
Register exponent_tagged = MathPowTaggedDescriptor::exponent();
DCHECK(exponent_tagged.is(x11));
Register exponent_integer = MathPowIntegerDescriptor::exponent();
DCHECK(exponent_integer.is(x12));
Register exponent_integer = x12;
Register saved_lr = x19;
VRegister result_double = d0;
VRegister base_double = d0;
@ -3348,7 +3009,7 @@ void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
VRegister scratch0_double = d7;
// A fast-path for integer exponents.
Label exponent_is_smi, exponent_is_integer;
Label exponent_is_integer;
// Allocate a heap number for the result, and return it.
Label done;
@ -3368,24 +3029,12 @@ void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
__ B(&done);
}
// Handle SMI exponents.
__ Bind(&exponent_is_smi);
// x10 base_tagged The tagged base (input).
// x11 exponent_tagged The tagged exponent (input).
// d1 base_double The base as a double.
__ SmiUntag(exponent_integer, exponent_tagged);
__ Bind(&exponent_is_integer);
// x10 base_tagged The tagged base (input).
// x11 exponent_tagged The tagged exponent (input).
// x12 exponent_integer The exponent as an integer.
// d1 base_double The base as a double.
// Find abs(exponent). For negative exponents, we can find the inverse later.
Register exponent_abs = x13;
__ Cmp(exponent_integer, 0);
__ Cneg(exponent_abs, exponent_integer, mi);
// x13 exponent_abs The value of abs(exponent_integer).
// Repeatedly multiply to calculate the power.
// result = 1.0;
@ -3441,6 +3090,102 @@ void Builtins::Generate_MathPowInternal(MacroAssembler* masm) {
__ Ret();
}
namespace {
void GenerateInternalArrayConstructorCase(MacroAssembler* masm,
ElementsKind kind) {
Label zero_case, n_case;
Register argc = x0;
__ Cbz(argc, &zero_case);
__ CompareAndBranch(argc, 1, ne, &n_case);
// One argument.
if (IsFastPackedElementsKind(kind)) {
Label packed_case;
// We might need to create a holey array; look at the first argument.
__ Peek(x10, 0);
__ Cbz(x10, &packed_case);
__ Jump(CodeFactory::InternalArraySingleArgumentConstructor(
masm->isolate(), GetHoleyElementsKind(kind))
.code(),
RelocInfo::CODE_TARGET);
__ Bind(&packed_case);
}
__ Jump(
CodeFactory::InternalArraySingleArgumentConstructor(masm->isolate(), kind)
.code(),
RelocInfo::CODE_TARGET);
__ Bind(&zero_case);
// No arguments.
__ Jump(CodeFactory::InternalArrayNoArgumentConstructor(masm->isolate(), kind)
.code(),
RelocInfo::CODE_TARGET);
__ Bind(&n_case);
// N arguments.
Handle<Code> code = BUILTIN_CODE(masm->isolate(), ArrayNArgumentsConstructor);
__ Jump(code, RelocInfo::CODE_TARGET);
}
} // namespace
void Builtins::Generate_InternalArrayConstructorImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : argc
// -- x1 : constructor
// -- sp[0] : return address
// -- sp[4] : last argument
// -----------------------------------
Register constructor = x1;
if (FLAG_debug_code) {
// The array construct code is only set for the global and natives
// builtin Array functions which always have maps.
Label unexpected_map, map_ok;
// Initial map for the builtin Array function should be a map.
__ Ldr(x10, FieldMemOperand(constructor,
JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a nullptr and a Smi.
__ JumpIfSmi(x10, &unexpected_map);
__ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
__ Bind(&unexpected_map);
__ Abort(AbortReason::kUnexpectedInitialMapForArrayFunction);
__ Bind(&map_ok);
}
Register kind = w3;
// Figure out the right elements kind
__ Ldr(x10, FieldMemOperand(constructor,
JSFunction::kPrototypeOrInitialMapOffset));
// Retrieve elements_kind from map.
__ LoadElementsKindFromMap(kind, x10);
if (FLAG_debug_code) {
Label done;
__ Cmp(x3, PACKED_ELEMENTS);
__ Ccmp(x3, HOLEY_ELEMENTS, ZFlag, ne);
__ Assert(
eq,
AbortReason::kInvalidElementsKindForInternalArrayOrInternalPackedArray);
}
Label fast_elements_case;
__ CompareAndBranch(kind, PACKED_ELEMENTS, eq, &fast_elements_case);
GenerateInternalArrayConstructorCase(masm, HOLEY_ELEMENTS);
__ Bind(&fast_elements_case);
GenerateInternalArrayConstructorCase(masm, PACKED_ELEMENTS);
}
#undef __
} // namespace internal

173
deps/v8/src/builtins/array-foreach.tq vendored Normal file
View File

@ -0,0 +1,173 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
module array {
macro ArrayForEachTorqueContinuation(
context: Context, o: Object, len: Number, callbackfn: Callable,
thisArg: Object, initial_k: Smi): Object {
// 5. Let k be 0.
// 6. Repeat, while k < len
for (let k: Smi = initial_k; k < len; k = k + 1) {
// 6a. Let Pk be ! ToString(k).
let pK: String = ToString_Inline(context, k);
// 6b. Let kPresent be ? HasProperty(O, Pk).
let kPresent: Oddball = HasPropertyObject(o, pK, context, kHasProperty);
// 6c. If kPresent is true, then
if (kPresent == True) {
// 6c. i. Let kValue be ? Get(O, Pk).
let kValue: Object = GetProperty(context, o, pK);
// 6c. ii. Perform ? Call(callbackfn, T, <kValue, k, O>).
Call(context, callbackfn, thisArg, kValue, k, o);
}
// 6d. Increase k by 1. (done by the loop).
}
return Undefined;
}
javascript builtin ArrayForEachLoopEagerDeoptContinuation(
context: Context, receiver: Object, callback: Object, thisArg: Object,
initialK: Object, length: Object): Object {
return ArrayForEachLoopContinuation(
context, receiver, callback, thisArg, Undefined, receiver, initialK,
length, Undefined);
}
javascript builtin ArrayForEachLoopLazyDeoptContinuation(
context: Context, receiver: Object, callback: Object, thisArg: Object,
initialK: Object, length: Object, result: Object): Object {
return ArrayForEachLoopContinuation(
context, receiver, callback, thisArg, Undefined, receiver, initialK,
length, Undefined);
}
builtin ArrayForEachLoopContinuation(
context: Context, receiver: Object, callback: Object, thisArg: Object,
array: Object, object: Object, initialK: Object, length: Object,
to: Object): Object {
try {
let callbackfn: Callable = cast<Callable>(callback) otherwise Unexpected;
let k: Smi = cast<Smi>(initialK) otherwise Unexpected;
let number_length: Number = cast<Number>(length) otherwise Unexpected;
return ArrayForEachTorqueContinuation(
context, object, number_length, callbackfn, thisArg, k);
}
label Unexpected {
unreachable;
}
}
macro VisitAllElements<FixedArrayType : type>(
context: Context, a: JSArray, len: Smi, callbackfn: Callable,
thisArg: Object): void labels
Bailout(Smi) {
let k: Smi = 0;
let map: Map = a.map;
try {
// Build a fast loop over the smi array.
for (; k < len; k = k + 1) {
// Ensure that the map didn't change.
if (map != a.map) goto Slow;
// Ensure that we haven't walked beyond a possibly updated length.
if (k >= a.length) goto Slow;
try {
let value: Object =
LoadElementNoHole<FixedArrayType>(a, k) otherwise FoundHole;
Call(context, callbackfn, thisArg, value, k, a);
}
label FoundHole {
// If we found the hole, we need to bail out if the initial
// array prototype has had elements inserted. This is preferable
// to walking the prototype chain looking for elements.
if (IsNoElementsProtectorCellInvalid()) goto Bailout(k);
}
}
}
label Slow {
goto Bailout(k);
}
}
macro FastArrayForEach(
context: Context, o: Object, len: Number, callbackfn: Callable,
thisArg: Object): Object labels
Bailout(Smi) {
let k: Smi = 0;
try {
let smi_len: Smi = cast<Smi>(len) otherwise Slow;
let a: JSArray = cast<JSArray>(o) otherwise Slow;
let map: Map = a.map;
if (!IsPrototypeInitialArrayPrototype(context, map)) goto Slow;
let elementsKind: ElementsKind = map.elements_kind;
if (!IsFastElementsKind(elementsKind)) goto Slow;
if (IsElementsKindGreaterThan(elementsKind, HOLEY_ELEMENTS)) {
VisitAllElements<FixedDoubleArray>(
context, a, smi_len, callbackfn, thisArg)
otherwise Bailout;
} else {
VisitAllElements<FixedArray>(context, a, smi_len, callbackfn, thisArg)
otherwise Bailout;
}
}
label Slow {
goto Bailout(k);
}
return Undefined;
}
// https://tc39.github.io/ecma262/#sec-array.prototype.foreach
javascript builtin ArrayForEach(
context: Context, receiver: Object, ...arguments): Object {
try {
if (IsNullOrUndefined(receiver)) {
goto NullOrUndefinedError;
}
// 1. Let O be ? ToObject(this value).
let o: Object = ToObject(context, receiver);
// 2. Let len be ? ToLength(? Get(O, "length")).
let len: Number = GetLengthProperty(context, o);
// 3. If IsCallable(callbackfn) is false, throw a TypeError exception.
if (arguments.length == 0) {
goto TypeError;
}
let callbackfn: Callable =
cast<Callable>(arguments[0]) otherwise TypeError;
// 4. If thisArg is present, let T be thisArg; else let T be undefined.
let thisArg: Object = arguments.length > 1 ? arguments[1] : Undefined;
// Special cases.
let k: Smi = 0;
try {
return FastArrayForEach(context, o, len, callbackfn, thisArg)
otherwise Bailout;
}
label Bailout(k_value: Smi) {
k = k_value;
}
return ArrayForEachTorqueContinuation(
context, o, len, callbackfn, thisArg, k);
}
label TypeError {
ThrowTypeError(context, kCalledNonCallable, arguments[0]);
}
label NullOrUndefinedError {
ThrowTypeError(
context, kCalledOnNullOrUndefined, 'Array.prototype.forEach');
}
}
}

8
deps/v8/src/builtins/array-sort.tq vendored Normal file
View File

@ -0,0 +1,8 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
module array {
// TODO(szuend): TimSort implementation will go here. Keeping the file around
// after removing the QuickSort Torque implementation.
}

View File

@ -3,8 +3,18 @@
// found in the LICENSE file.
module array {
macro GetLengthProperty(context: Context, o: Object): Number {
if (BranchIfFastJSArray(o, context)) {
let a: JSArray = unsafe_cast<JSArray>(o);
return a.length_fast;
} else
deferred {
return ToLength_Inline(context, GetProperty(context, o, 'length'));
}
}
macro FastArraySplice(
context: Context, args: Arguments, o: Object,
context: Context, args: constexpr Arguments, o: Object,
originalLengthNumber: Number, actualStartNumber: Number, insertCount: Smi,
actualDeleteCountNumber: Number): Object
labels Bailout {
@ -103,8 +113,7 @@ module array {
let o: Object = ToObject(context, receiver);
// 2. Let len be ? ToLength(? Get(O, "length")).
let len: Number =
ToLength_Inline(context, GetProperty(context, o, 'length'));
let len: Number = GetLengthProperty(context, o);
// 3. Let relativeStart be ? ToInteger(start).
let start: Object = arguments[0];
@ -145,7 +154,7 @@ module array {
// 8. If len + insertCount - actualDeleteCount > 2^53-1, throw a
// Bailout exception.
if (len + insertCount - actualDeleteCount > kMaxSafeInteger) {
ThrowRangeError(context, kInvalidArrayLengthMessage);
ThrowRangeError(context, kInvalidArrayLength);
}
try {
@ -186,7 +195,7 @@ module array {
}
// 12. Perform ? Set(A, "length", actualDeleteCount, true).
SetProperty(context, a, 'length', actualDeleteCount, strict);
SetProperty(context, a, 'length', actualDeleteCount, kStrict);
// 13. Let items be a List whose elements are, in left-to-right order,
// the portion of the actual argument list starting with the third
@ -217,12 +226,12 @@ module array {
let fromValue: Object = GetProperty(context, o, from);
// 2. Perform ? Set(O, to, fromValue, true).
SetProperty(context, o, to, fromValue, strict);
SetProperty(context, o, to, fromValue, kStrict);
// v. Else fromPresent is false,
} else {
// 1. Perform ? DeletePropertyOrThrow(O, to).
DeleteProperty(context, o, to, strict);
DeleteProperty(context, o, to, kStrict);
}
// vi. Increase k by 1.
k = k + 1;
@ -233,7 +242,7 @@ module array {
// d. Repeat, while k > (len - actualDeleteCount + itemCount)
while (k > (len - actualDeleteCount + itemCount)) {
// i. Perform ? DeletePropertyOrThrow(O, ! ToString(k - 1)).
DeleteProperty(context, o, ToString_Inline(context, k - 1), strict);
DeleteProperty(context, o, ToString_Inline(context, k - 1), kStrict);
// ii. Decrease k by 1.
k = k - 1;
@ -261,12 +270,12 @@ module array {
let fromValue: Object = GetProperty(context, o, from);
// 2. Perform ? Set(O, to, fromValue, true).
SetProperty(context, o, to, fromValue, strict);
SetProperty(context, o, to, fromValue, kStrict);
// v. Else fromPresent is false,
} else {
// 1. Perform ? DeletePropertyOrThrow(O, to).
DeleteProperty(context, o, to, strict);
DeleteProperty(context, o, to, kStrict);
}
// vi. Decrease k by 1.
@ -283,7 +292,7 @@ module array {
if (arguments.length > 2) {
for (let e: Object of arguments [2: ]) {
// b. Perform ? Set(O, ! ToString(k), E, true).
SetProperty(context, o, ToString_Inline(context, k), e, strict);
SetProperty(context, o, ToString_Inline(context, k), e, kStrict);
// c. Increase k by 1.
k = k + 1;
@ -293,177 +302,8 @@ module array {
// 19. Perform ? Set(O, "length", len - actualDeleteCount + itemCount,
// true).
SetProperty(
context, o, 'length', len - actualDeleteCount + itemCount, strict);
context, o, 'length', len - actualDeleteCount + itemCount, kStrict);
return a;
}
macro ArrayForEachTorqueContinuation(
context: Context, o: Object, len: Number, callbackfn: Callable,
thisArg: Object, initial_k: Smi): Object {
// 5. Let k be 0.
// 6. Repeat, while k < len
for (let k: Smi = initial_k; k < len; k = k + 1) {
// 6a. Let Pk be ! ToString(k).
let pK: String = ToString_Inline(context, k);
// 6b. Let kPresent be ? HasProperty(O, Pk).
let kPresent: Oddball = HasPropertyObject(o, pK, context, kHasProperty);
// 6c. If kPresent is true, then
if (kPresent == True) {
// 6c. i. Let kValue be ? Get(O, Pk).
let kValue: Object = GetProperty(context, o, pK);
// 6c. ii. Perform ? Call(callbackfn, T, <kValue, k, O>).
Call(context, callbackfn, thisArg, kValue, k, o);
}
// 6d. Increase k by 1. (done by the loop).
}
return Undefined;
}
javascript builtin ArrayForEachLoopEagerDeoptContinuation(
context: Context, receiver: Object, callback: Object, thisArg: Object,
initialK: Object, length: Object): Object {
return ArrayForEachLoopContinuation(
context, receiver, callback, thisArg, Undefined, receiver, initialK,
length, Undefined);
}
javascript builtin ArrayForEachLoopLazyDeoptContinuation(
context: Context, receiver: Object, callback: Object, thisArg: Object,
initialK: Object, length: Object, result: Object): Object {
return ArrayForEachLoopContinuation(
context, receiver, callback, thisArg, Undefined, receiver, initialK,
length, Undefined);
}
builtin ArrayForEachLoopContinuation(
context: Context, receiver: Object, callback: Object, thisArg: Object,
array: Object, object: Object, initialK: Object, length: Object,
to: Object): Object {
try {
let callbackfn: Callable = cast<Callable>(callback) otherwise Unexpected;
let k: Smi = cast<Smi>(initialK) otherwise Unexpected;
let number_length: Number = cast<Number>(length) otherwise Unexpected;
return ArrayForEachTorqueContinuation(
context, object, number_length, callbackfn, thisArg, k);
}
label Unexpected {
unreachable;
}
}
macro VisitAllElements<FixedArrayType : type>(
context: Context, a: JSArray, len: Smi, callbackfn: Callable,
thisArg: Object): void labels
Bailout(Smi) {
let k: Smi = 0;
let map: Map = a.map;
try {
// Build a fast loop over the smi array.
for (; k < len; k = k + 1) {
// Ensure that the map didn't change.
if (map != a.map) goto Slow;
// Ensure that we haven't walked beyond a possibly updated length.
if (k >= a.length) goto Slow;
try {
let value: Object =
LoadElementNoHole<FixedArrayType>(a, k) otherwise FoundHole;
Call(context, callbackfn, thisArg, value, k, a);
}
label FoundHole {
// If we found the hole, we need to bail out if the initial
// array prototype has had elements inserted. This is preferable
// to walking the prototype chain looking for elements.
if (IsNoElementsProtectorCellInvalid()) goto Bailout(k);
}
}
}
label Slow {
goto Bailout(k);
}
}
macro FastArrayForEach(
context: Context, o: Object, len: Number, callbackfn: Callable,
thisArg: Object): Object labels
Bailout(Smi) {
let k: Smi = 0;
try {
let smi_len: Smi = cast<Smi>(len) otherwise Slow;
let a: JSArray = cast<JSArray>(o) otherwise Slow;
let map: Map = a.map;
if (!IsPrototypeInitialArrayPrototype(context, map)) goto Slow;
let elementsKind: ElementsKind = map.elements_kind;
if (!IsFastElementsKind(elementsKind)) goto Slow;
if (IsElementsKindGreaterThan(elementsKind, HOLEY_ELEMENTS)) {
VisitAllElements<FixedDoubleArray>(
context, a, smi_len, callbackfn, thisArg)
otherwise Bailout;
} else {
VisitAllElements<FixedArray>(context, a, smi_len, callbackfn, thisArg)
otherwise Bailout;
}
}
label Slow {
goto Bailout(k);
}
return Undefined;
}
// https://tc39.github.io/ecma262/#sec-array.prototype.foreach
javascript builtin ArrayForEach(
context: Context, receiver: Object, ...arguments): Object {
try {
if (IsNullOrUndefined(receiver)) {
goto NullOrUndefinedError;
}
// 1. Let O be ? ToObject(this value).
let o: Object = ToObject(context, receiver);
// 2. Let len be ? ToLength(? Get(O, "length")).
let len: Number =
ToLength_Inline(context, GetProperty(context, o, 'length'));
// 3. If IsCallable(callbackfn) is false, throw a TypeError exception.
if (arguments.length == 0) {
goto TypeError;
}
let callbackfn: Callable =
cast<Callable>(arguments[0]) otherwise TypeError;
// 4. If thisArg is present, let T be thisArg; else let T be undefined.
let thisArg: Object = arguments.length > 1 ? arguments[1] : Undefined;
// Special cases.
let k: Smi = 0;
try {
return FastArrayForEach(context, o, len, callbackfn, thisArg)
otherwise Bailout;
}
label Bailout(k_value: Smi) {
k = k_value;
}
return ArrayForEachTorqueContinuation(
context, o, len, callbackfn, thisArg, k);
}
label TypeError {
ThrowTypeError(context, kCalledNonCallable, arguments[0]);
}
label NullOrUndefinedError {
ThrowTypeError(
context, kCalledOnNullOrUndefined, 'Array.prototype.forEach');
}
}
}

View File

@ -2,21 +2,26 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
type Arguments generates 'CodeStubArguments*';
type Arguments constexpr 'CodeStubArguments*';
type void generates 'void';
type never generates 'void';
type Object generates 'TNode<Object>';
type Tagged generates 'TNode<Object>';
type Smi extends Tagged generates 'TNode<Smi>';
type HeapObject extends Tagged generates 'TNode<HeapObject>';
type Object = Smi|HeapObject;
type int32 generates 'TNode<Int32T>' constexpr 'int32_t';
type uint32 generates 'TNode<Uint32T>' constexpr 'uint32_t';
type int64 generates 'TNode<Int64T>' constexpr 'int64_t';
type intptr generates 'TNode<IntPtrT>' constexpr 'intptr_t';
type uintptr generates 'TNode<UintPtrT>' constexpr 'uintptr_t';
type float32 generates 'TNode<Float32T>' constexpr 'float';
type float64 generates 'TNode<Float64T>' constexpr 'double';
type bool generates 'TNode<BoolT>' constexpr 'bool';
type string constexpr 'const char*';
type int31 extends int32 generates 'TNode<Int32T>' constexpr 'int32_t';
type int31 extends int32 generates 'TNode<Int32T>' constexpr 'int31_t';
type RawPtr generates 'TNode<RawPtrT>' constexpr 'void*';
type Number extends Object generates 'TNode<Number>';
type Smi extends Number generates 'TNode<Smi>';
type HeapObject extends Object generates 'TNode<HeapObject>';
type AbstractCode extends HeapObject generates 'TNode<AbstractCode>';
type Code extends AbstractCode generates 'TNode<Code>';
type JSReceiver extends HeapObject generates 'TNode<JSReceiver>';
@ -24,108 +29,183 @@ type Context extends HeapObject generates 'TNode<Context>';
type String extends HeapObject generates 'TNode<String>';
type Oddball extends HeapObject generates 'TNode<Oddball>';
type HeapNumber extends HeapObject generates 'TNode<HeapNumber>';
type Number = Smi|HeapNumber;
type BigInt extends HeapObject generates 'TNode<BigInt>';
type Numeric = Number|BigInt;
type Boolean extends Oddball generates 'TNode<Oddball>';
type JSArray extends HeapObject generates 'TNode<JSArray>';
type Callable extends JSReceiver generates 'TNode<JSReceiver>';
type JSFunction extends Callable generates 'TNode<JSFunction>';
type JSProxy extends JSReceiver generates 'TNode<JSProxy>';
type JSObject extends JSReceiver generates 'TNode<JSObject>';
type JSArray extends JSObject generates 'TNode<JSArray>';
type JSFunction extends JSObject generates 'TNode<JSFunction>';
type JSBoundFunction extends JSObject generates 'TNode<JSBoundFunction>';
type Callable = JSFunction|JSBoundFunction|JSProxy;
type Map extends HeapObject generates 'TNode<Map>';
type FixedArrayBase extends HeapObject generates 'TNode<FixedArrayBase>';
type FixedArray extends FixedArrayBase generates 'TNode<FixedArray>';
type FixedDoubleArray extends FixedArrayBase generates
'TNode<FixedDoubleArray>';
type FixedTypedArrayBase extends FixedArrayBase generates
'TNode<FixedTypedArrayBase>';
type FixedTypedArray extends FixedTypedArrayBase generates
'TNode<FixedTypedArray>';
type NumberDictionary extends HeapObject generates 'TNode<NumberDictionary>';
type JSArrayBuffer extends Object generates 'TNode<JSArrayBuffer>';
type JSArrayBufferView extends Object generates 'TNode<JSArrayBufferView>';
type JSArrayBuffer extends JSObject generates 'TNode<JSArrayBuffer>';
type JSArrayBufferView extends JSObject generates 'TNode<JSArrayBufferView>';
type JSTypedArray extends JSArrayBufferView generates 'TNode<JSTypedArray>';
type JSDataView extends JSArrayBufferView generates 'TNode<JSDataView>';
type InstanceType extends int32 generates 'TNode<Int32T>';
type InstanceType generates 'TNode<Int32T>' constexpr 'InstanceType';
type ElementsKind generates 'TNode<Int32T>' constexpr 'ElementsKind';
type LanguageMode generates 'TNode<Smi>' constexpr 'LanguageMode';
type ExtractFixedArrayFlags generates
'TNode<Smi>' constexpr 'ExtractFixedArrayFlags';
type ParameterMode generates 'TNode<Int32T>' constexpr 'ParameterMode';
type RootListIndex generates 'TNode<Int32T>' constexpr 'Heap::RootListIndex';
type MessageTemplate;
type HasPropertyFlag generates 'HasPropertyLookupMode';
type MessageTemplate constexpr 'MessageTemplate';
type HasPropertyLookupMode constexpr 'HasPropertyLookupMode';
const PACKED_SMI_ELEMENTS: constexpr ElementsKind = 'PACKED_SMI_ELEMENTS';
const HOLEY_SMI_ELEMENTS: constexpr ElementsKind = 'HOLEY_SMI_ELEMENTS';
const PACKED_ELEMENTS: constexpr ElementsKind = 'PACKED_ELEMENTS';
const HOLEY_ELEMENTS: constexpr ElementsKind = 'HOLEY_ELEMENTS';
const PACKED_DOUBLE_ELEMENTS: constexpr ElementsKind = 'PACKED_DOUBLE_ELEMENTS';
const HOLEY_DOUBLE_ELEMENTS: constexpr ElementsKind = 'HOLEY_DOUBLE_ELEMENTS';
type ToIntegerTruncationMode constexpr 'ToIntegerTruncationMode';
const UINT8_ELEMENTS: constexpr ElementsKind = 'UINT8_ELEMENTS';
const INT8_ELEMENTS: constexpr ElementsKind = 'INT8_ELEMENTS';
const UINT16_ELEMENTS: constexpr ElementsKind = 'UINT16_ELEMENTS';
const INT16_ELEMENTS: constexpr ElementsKind = 'INT16_ELEMENTS';
const UINT32_ELEMENTS: constexpr ElementsKind = 'UINT32_ELEMENTS';
const INT32_ELEMENTS: constexpr ElementsKind = 'INT32_ELEMENTS';
const FLOAT32_ELEMENTS: constexpr ElementsKind = 'FLOAT32_ELEMENTS';
const FLOAT64_ELEMENTS: constexpr ElementsKind = 'FLOAT64_ELEMENTS';
const UINT8_CLAMPED_ELEMENTS: constexpr ElementsKind = 'UINT8_CLAMPED_ELEMENTS';
const BIGUINT64_ELEMENTS: constexpr ElementsKind = 'BIGUINT64_ELEMENTS';
const BIGINT64_ELEMENTS: constexpr ElementsKind = 'BIGINT64_ELEMENTS';
const NO_ELEMENTS: constexpr ElementsKind generates 'NO_ELEMENTS';
const kAllFixedArrays: constexpr ExtractFixedArrayFlags =
'ExtractFixedArrayFlag::kAllFixedArrays';
const PACKED_SMI_ELEMENTS: constexpr ElementsKind generates
'PACKED_SMI_ELEMENTS';
const HOLEY_SMI_ELEMENTS: constexpr ElementsKind generates 'HOLEY_SMI_ELEMENTS';
const PACKED_ELEMENTS: constexpr ElementsKind generates 'PACKED_ELEMENTS';
const HOLEY_ELEMENTS: constexpr ElementsKind generates 'HOLEY_ELEMENTS';
const PACKED_DOUBLE_ELEMENTS: constexpr ElementsKind generates
'PACKED_DOUBLE_ELEMENTS';
const HOLEY_DOUBLE_ELEMENTS: constexpr ElementsKind generates
'HOLEY_DOUBLE_ELEMENTS';
const DICTIONARY_ELEMENTS: constexpr ElementsKind generates
'DICTIONARY_ELEMENTS';
const kCOWMap: Map = 'LoadRoot(Heap::kFixedCOWArrayMapRootIndex)';
const kEmptyFixedArray: FixedArrayBase =
'UncheckedCast<FixedArrayBase>(LoadRoot(Heap::kEmptyFixedArrayRootIndex))';
const UINT8_ELEMENTS: constexpr ElementsKind generates 'UINT8_ELEMENTS';
const INT8_ELEMENTS: constexpr ElementsKind generates 'INT8_ELEMENTS';
const UINT16_ELEMENTS: constexpr ElementsKind generates 'UINT16_ELEMENTS';
const INT16_ELEMENTS: constexpr ElementsKind generates 'INT16_ELEMENTS';
const UINT32_ELEMENTS: constexpr ElementsKind generates 'UINT32_ELEMENTS';
const INT32_ELEMENTS: constexpr ElementsKind generates 'INT32_ELEMENTS';
const FLOAT32_ELEMENTS: constexpr ElementsKind generates 'FLOAT32_ELEMENTS';
const FLOAT64_ELEMENTS: constexpr ElementsKind generates 'FLOAT64_ELEMENTS';
const UINT8_CLAMPED_ELEMENTS: constexpr ElementsKind generates
'UINT8_CLAMPED_ELEMENTS';
const BIGUINT64_ELEMENTS: constexpr ElementsKind generates 'BIGUINT64_ELEMENTS';
const BIGINT64_ELEMENTS: constexpr ElementsKind generates 'BIGINT64_ELEMENTS';
const kInvalidArrayLengthMessage: MessageTemplate =
'MessageTemplate::kInvalidArrayLength';
const kCalledNonCallable: MessageTemplate =
'MessageTemplate::kCalledNonCallable';
const kCalledOnNullOrUndefined: MessageTemplate =
'MessageTemplate::kCalledOnNullOrUndefined';
type FixedUint8Array extends FixedTypedArray;
type FixedInt8Array extends FixedTypedArray;
type FixedUint16Array extends FixedTypedArray;
type FixedInt16Array extends FixedTypedArray;
type FixedUint32Array extends FixedTypedArray;
type FixedInt32Array extends FixedTypedArray;
type FixedFloat32Array extends FixedTypedArray;
type FixedFloat64Array extends FixedTypedArray;
type FixedUint8ClampedArray extends FixedTypedArray;
type FixedBigUint64Array extends FixedTypedArray;
type FixedBigInt64Array extends FixedTypedArray;
const kHasProperty: HasPropertyFlag = 'kHasProperty';
const kAllFixedArrays: constexpr ExtractFixedArrayFlags generates
'ExtractFixedArrayFlag::kAllFixedArrays';
const kMaxSafeInteger: constexpr float64 = 'kMaxSafeInteger';
const kFixedCOWArrayMapRootIndex: constexpr RootListIndex generates
'Heap::kFixedCOWArrayMapRootIndex';
const kEmptyFixedArrayRootIndex: constexpr RootListIndex generates
'Heap::kEmptyFixedArrayRootIndex';
const kNotTypedArray: MessageTemplate = 'MessageTemplate::kNotTypedArray';
const kDetachedOperation: MessageTemplate =
'MessageTemplate::kDetachedOperation';
const kBadSortComparisonFunction: MessageTemplate =
'MessageTemplate::kBadSortComparisonFunction';
const kInvalidArrayLength: constexpr MessageTemplate generates
'MessageTemplate::kInvalidArrayLength';
const kCalledNonCallable: constexpr MessageTemplate generates
'MessageTemplate::kCalledNonCallable';
const kCalledOnNullOrUndefined: constexpr MessageTemplate generates
'MessageTemplate::kCalledOnNullOrUndefined';
const Hole: Oddball = 'TheHoleConstant()';
const Null: Oddball = 'NullConstant()';
const Undefined: Oddball = 'UndefinedConstant()';
const True: Boolean = 'TrueConstant()';
const False: Boolean = 'FalseConstant()';
const true: constexpr bool = 'true';
const false: constexpr bool = 'false';
const kHasProperty: constexpr HasPropertyLookupMode generates 'kHasProperty';
const strict: constexpr LanguageMode = 'LanguageMode::kStrict';
const sloppy: constexpr LanguageMode = 'LanguageMode::kSloppy';
const kMaxSafeInteger: constexpr float64 generates 'kMaxSafeInteger';
const kTruncateMinusZero: constexpr ToIntegerTruncationMode generates
'ToIntegerTruncationMode::kTruncateMinusZero';
const kNotTypedArray: constexpr MessageTemplate generates
'MessageTemplate::kNotTypedArray';
const kDetachedOperation: constexpr MessageTemplate generates
'MessageTemplate::kDetachedOperation';
const kBadSortComparisonFunction: constexpr MessageTemplate generates
'MessageTemplate::kBadSortComparisonFunction';
const kIncompatibleMethodReceiver: constexpr MessageTemplate generates
'MessageTemplate::kIncompatibleMethodReceiver';
const kInvalidDataViewAccessorOffset: constexpr MessageTemplate generates
'MessageTemplate::kInvalidDataViewAccessorOffset';
const kStrictReadOnlyProperty: constexpr MessageTemplate generates
'MessageTemplate::kStrictReadOnlyProperty';
extern macro TheHoleConstant(): Oddball;
extern macro NullConstant(): Oddball;
extern macro UndefinedConstant(): Oddball;
extern macro TrueConstant(): Boolean;
extern macro FalseConstant(): Boolean;
const Hole: Oddball = TheHoleConstant();
const Null: Oddball = NullConstant();
const Undefined: Oddball = UndefinedConstant();
const True: Boolean = TrueConstant();
const False: Boolean = FalseConstant();
const true: constexpr bool generates 'true';
const false: constexpr bool generates 'false';
const kStrict: constexpr LanguageMode generates 'LanguageMode::kStrict';
const kSloppy: constexpr LanguageMode generates 'LanguageMode::kSloppy';
const SMI_PARAMETERS: constexpr ParameterMode generates 'SMI_PARAMETERS';
const INTPTR_PARAMETERS: constexpr ParameterMode generates 'INTPTR_PARAMETERS';
extern macro Is64(): constexpr bool;
extern macro Print(constexpr string);
extern macro Print(constexpr string, Object);
extern macro Print(Object);
extern macro DebugBreak();
extern macro ToInteger_Inline(Context, Object): Number;
extern macro ToInteger_Inline(
Context, Object, constexpr ToIntegerTruncationMode): Number;
extern macro ToLength_Inline(Context, Object): Number;
extern macro ToNumber_Inline(Context, Object): Number;
extern macro ToString_Inline(Context, Object): String;
extern macro GetProperty(Context, Object, Object): Object;
extern macro HasProperty(HeapObject, Object, Context, HasPropertyFlag): Oddball;
extern macro ThrowRangeError(Context, MessageTemplate): never;
extern macro ThrowTypeError(Context, MessageTemplate): never;
extern macro ThrowTypeError(Context, MessageTemplate, Object): never;
extern macro HasProperty(
HeapObject, Object, Context, constexpr HasPropertyLookupMode): Oddball;
extern macro ThrowRangeError(Context, constexpr MessageTemplate): never;
extern macro ThrowTypeError(Context, constexpr MessageTemplate): never;
extern macro ThrowTypeError(Context, constexpr MessageTemplate, Object): never;
extern macro ThrowTypeError(Context, constexpr MessageTemplate, Object, Object,
Object): never;
extern macro ArraySpeciesCreate(Context, Object, Number): Object;
extern macro EnsureArrayPushable(Map): ElementsKind labels Bailout;
extern builtin ToObject(Context, Object): Object;
extern builtin ToObject(Context, Object): JSReceiver;
extern macro IsNullOrUndefined(Object): bool;
extern macro IsTheHole(Object): bool;
extern macro IsString(HeapObject): bool;
extern builtin ToString(Context, Object): String;
extern runtime CreateDataProperty(Context, Object, Object, Object);
extern runtime SetProperty(Context, Object, Object, Object, LanguageMode);
extern runtime DeleteProperty(Context, Object, Object, LanguageMode);
extern runtime StringEqual(Context, String, String): Oddball;
extern macro LoadRoot(constexpr RootListIndex): Object;
extern macro StoreRoot(constexpr RootListIndex, Object): Object;
extern macro LoadAndUntagToWord32Root(constexpr RootListIndex): int32;
extern runtime StringEqual(Context, String, String): Oddball;
extern builtin StringLessThan(Context, String, String): Boolean;
extern macro StrictEqual(Object, Object): Boolean;
extern runtime SmiLexicographicCompare(Context, Object, Object): Number;
extern operator '==' macro Word32Equal(int32, int32): bool;
extern operator '!=' macro Word32NotEqual(int32, int32): bool;
extern operator '<' macro Int32LessThan(int32, int32): bool;
extern operator '>' macro Int32GreaterThan(int32, int32): bool;
extern operator '<=' macro Int32LessThanOrEqual(int32, int32): bool;
@ -140,18 +220,24 @@ extern operator '>=' macro SmiGreaterThanOrEqual(Smi, Smi): bool;
extern operator '==' macro ElementsKindEqual(
constexpr ElementsKind, constexpr ElementsKind): constexpr bool;
extern operator '==' macro ElementsKindEqual(ElementsKind, ElementsKind): bool;
extern macro IsFastElementsKind(constexpr ElementsKind): constexpr bool;
extern macro IsDoubleElementsKind(constexpr ElementsKind): constexpr bool;
extern macro SmiAbove(Smi, Smi): bool;
extern operator '==' macro WordEqual(intptr, intptr): bool;
extern operator '==' macro WordEqual(uintptr, uintptr): bool;
extern operator '!=' macro WordNotEqual(intptr, intptr): bool;
extern operator '!=' macro WordNotEqual(uintptr, uintptr): bool;
extern operator '<' macro IntPtrLessThan(intptr, intptr): bool;
extern operator '>' macro IntPtrGreaterThan(intptr, intptr): bool;
extern operator '<=' macro IntPtrLessThanOrEqual(intptr, intptr): bool;
extern operator '>=' macro IntPtrGreaterThanOrEqual(intptr, intptr): bool;
extern operator '>=' macro UintPtrGreaterThanOrEqual(uintptr, uintptr): bool;
extern operator '==' macro Float64Equal(float64, float64): bool;
extern operator '!=' macro Float64NotEqual(float64, float64): bool;
extern operator
'<' macro BranchIfNumberLessThan(Number, Number): never labels Taken, NotTaken;
@ -170,11 +256,34 @@ extern operator '!=' macro WordNotEqual(Object, Object): bool;
extern operator '+' macro SmiAdd(Smi, Smi): Smi;
extern operator '-' macro SmiSub(Smi, Smi): Smi;
extern operator '&' macro SmiAnd(Smi, Smi): Smi;
extern operator '>>>' macro SmiShr(Smi, constexpr int31): Smi;
extern operator '+' macro IntPtrAdd(intptr, intptr): intptr;
extern operator '-' macro IntPtrSub(intptr, intptr): intptr;
extern operator '>>>' macro WordShr(intptr, intptr): intptr;
extern operator '>>>' macro WordShr(uintptr, uintptr): uintptr;
extern operator '<<' macro WordShl(intptr, intptr): intptr;
extern operator '&' macro WordAnd(intptr, intptr): intptr;
extern operator '&' macro WordAnd(uintptr, uintptr): uintptr;
extern operator '+' macro Int32Add(int32, int32): int32;
extern operator '-' macro Int32Sub(int32, int32): int32;
extern operator '*' macro Int32Mul(int32, int32): int32;
extern operator '%' macro Int32Mod(int32, int32): int32;
extern operator '&' macro Word32And(int32, int32): int32;
extern operator '&' macro Word32And(uint32, uint32): uint32;
extern operator '==' macro
ConstexprInt31Equal(constexpr int31, constexpr int31): constexpr bool;
extern operator '==' macro Word32Equal(int32, int32): bool;
extern operator '==' macro Word32Equal(uint32, uint32): bool;
extern operator '!=' macro Word32NotEqual(int32, int32): bool;
extern operator '!=' macro Word32NotEqual(uint32, uint32): bool;
extern operator '>>>' macro Word32Shr(uint32, uint32): uint32;
extern operator '<<' macro Word32Shl(int32, int32): int32;
extern operator '<<' macro Word32Shl(uint32, uint32): uint32;
extern operator '|' macro Word32Or(int32, int32): int32;
extern operator '|' macro Word32Or(uint32, uint32): uint32;
extern operator '+' macro NumberAdd(Number, Number): Number;
extern operator '-' macro NumberSub(Number, Number): Number;
@ -186,51 +295,286 @@ extern operator '!' macro Word32BinaryNot(bool): bool;
extern operator '.map' macro LoadMap(HeapObject): Map;
extern operator '.map=' macro StoreMap(HeapObject, Map);
extern operator '.instanceType' macro LoadInstanceType(Object): InstanceType;
extern operator
'.instanceType' macro LoadInstanceType(HeapObject): InstanceType;
extern operator '.length' macro LoadStringLengthAsWord(String): intptr;
extern operator '.length' macro GetArgumentsLength(Arguments): intptr;
extern operator '[]' macro GetArgumentValue(Arguments, intptr): Object;
extern operator '[]' macro GetArgumentValueSmiIndex(Arguments, Smi): Object;
extern operator '.length' macro GetArgumentsLength(constexpr Arguments): intptr;
extern operator
'[]' macro GetArgumentValue(constexpr Arguments, intptr): Object;
extern operator 'is<Smi>' macro TaggedIsSmi(Object): bool;
extern operator 'isnt<Smi>' macro TaggedIsNotSmi(Object): bool;
extern macro TaggedIsPositiveSmi(Object): bool;
extern operator
'cast<>' macro TaggedToHeapObject(Object): HeapObject labels CastError;
extern operator 'cast<>' macro TaggedToSmi(Object): Smi labels CastError;
extern operator
'cast<>' macro TaggedToJSArray(Object): JSArray labels CastError;
extern operator
'cast<>' macro TaggedToCallable(Object): Callable labels CastError;
extern operator 'cast<>' macro ConvertFixedArrayBaseToFixedArray(
FixedArrayBase): FixedArray labels CastError;
extern operator 'cast<>' macro ConvertFixedArrayBaseToFixedDoubleArray(
FixedArrayBase): FixedDoubleArray labels CastError;
extern macro TaggedToJSDataView(Object): JSDataView labels CastError;
extern macro TaggedToHeapObject(Object): HeapObject labels CastError;
extern macro TaggedToSmi(Object): Smi labels CastError;
extern macro TaggedToJSArray(Object): JSArray labels CastError;
extern macro TaggedToCallable(Object): Callable labels CastError;
extern macro ConvertFixedArrayBaseToFixedArray(FixedArrayBase):
FixedArray labels CastError;
extern macro ConvertFixedArrayBaseToFixedDoubleArray(FixedArrayBase):
FixedDoubleArray labels CastError;
extern macro TaggedToNumber(Object): Number labels CastError;
macro cast<A : type>(o: Object): A labels CastError;
cast<Number>(o: Object): Number labels CastError {
return TaggedToNumber(o) otherwise CastError;
}
cast<HeapObject>(o: Object): HeapObject labels CastError {
return TaggedToHeapObject(o) otherwise CastError;
}
cast<Smi>(o: Object): Smi labels CastError {
return TaggedToSmi(o) otherwise CastError;
}
cast<JSDataView>(o: Object): JSDataView labels CastError {
return TaggedToJSDataView(o) otherwise CastError;
}
cast<Callable>(o: Object): Callable labels CastError {
return TaggedToCallable(o) otherwise CastError;
}
cast<JSArray>(o: Object): JSArray labels CastError {
return TaggedToJSArray(o) otherwise CastError;
}
macro cast<A : type>(o: FixedArrayBase): A labels CastError;
cast<FixedArray>(o: FixedArrayBase): FixedArray labels CastError {
return ConvertFixedArrayBaseToFixedArray(o) otherwise CastError;
}
cast<FixedDoubleArray>(o: FixedArrayBase): FixedDoubleArray labels CastError {
return ConvertFixedArrayBaseToFixedDoubleArray(o) otherwise CastError;
}
extern macro AllocateHeapNumberWithValue(float64): HeapNumber;
extern macro ChangeInt32ToTagged(int32): Number;
extern macro ChangeUint32ToTagged(uint32): Number;
extern macro Unsigned(int32): uint32;
extern macro Unsigned(intptr): uintptr;
extern macro Unsigned(RawPtr): uintptr;
extern macro Signed(uint32): int32;
extern macro Signed(uintptr): intptr;
extern macro Signed(RawPtr): intptr;
extern macro TruncateIntPtrToInt32(intptr): int32;
extern macro SmiTag(intptr): Smi;
extern macro SmiFromInt32(int32): Smi;
extern macro SmiUntag(Smi): intptr;
extern macro SmiToInt32(Smi): int32;
extern macro RoundIntPtrToFloat64(intptr): float64;
extern macro LoadHeapNumberValue(HeapNumber): float64;
extern macro ChangeFloat32ToFloat64(float32): float64;
extern macro ChangeNumberToFloat64(Number): float64;
extern macro ChangeFloat64ToUintPtr(float64): uintptr;
extern macro ChangeInt32ToIntPtr(int32): intptr; // Sign-extends.
extern macro ChangeUint32ToWord(uint32): uintptr; // Doesn't sign-extend.
extern implicit operator
'convert<>' macro AllocateHeapNumberWithValue(constexpr float64): Number;
extern implicit operator
'convert<>' macro IntPtrConstant(constexpr int31): intptr;
extern implicit operator
'convert<>' macro Int32Constant(constexpr int31): int32;
extern implicit operator 'convert<>' macro SmiConstant(constexpr int31): Smi;
extern implicit operator
'convert<>' macro NumberConstant(constexpr int31): Number;
extern implicit operator 'convert<>' macro BoolConstant(constexpr bool): bool;
extern implicit operator 'convert<>' macro LanguageModeConstant(
constexpr LanguageMode): LanguageMode;
extern macro NumberConstant(constexpr float64): Number;
extern macro NumberConstant(constexpr int32): Number;
extern macro IntPtrConstant(constexpr int31): intptr;
extern macro IntPtrConstant(constexpr int32): intptr;
extern macro Int32Constant(constexpr int31): int31;
extern macro Int32Constant(constexpr int32): int32;
extern macro Float64Constant(constexpr int31): float64;
extern macro SmiConstant(constexpr int31): Smi;
extern macro BoolConstant(constexpr bool): bool;
extern macro StringConstant(constexpr string): String;
extern macro LanguageModeConstant(constexpr LanguageMode): LanguageMode;
extern macro Int32Constant(constexpr ElementsKind): ElementsKind;
extern implicit operator 'convert<>' macro SmiFromInt32(ElementsKind): Smi;
macro from_constexpr<A : type>(o: constexpr int31): A;
from_constexpr<intptr>(i: constexpr int31): intptr {
return IntPtrConstant(i);
}
from_constexpr<int31>(i: constexpr int31): int31 {
return Int32Constant(i);
}
from_constexpr<int32>(i: constexpr int31): int32 {
return Int32Constant(i);
}
from_constexpr<uint32>(i: constexpr int31): uint32 {
return Unsigned(Int32Constant(i));
}
from_constexpr<uintptr>(i: constexpr int31): uintptr {
return ChangeUint32ToWord(i);
}
from_constexpr<Smi>(i: constexpr int31): Smi {
return SmiConstant(i);
}
from_constexpr<Number>(i: constexpr int31): Number {
return SmiConstant(i);
}
from_constexpr<float64>(i: constexpr int31): float64 {
return Float64Constant(i);
}
macro from_constexpr<A : type>(o: constexpr int32): A;
from_constexpr<intptr>(i: constexpr int32): intptr {
return IntPtrConstant(i);
}
from_constexpr<int32>(i: constexpr int32): int32 {
return Int32Constant(i);
}
from_constexpr<Number>(i: constexpr int32): Number {
return NumberConstant(i);
}
macro from_constexpr<A : type>(o: constexpr float64): A;
from_constexpr<Number>(f: constexpr float64): Number {
return NumberConstant(f);
}
macro from_constexpr<A : type>(b: constexpr bool): A;
from_constexpr<bool>(b: constexpr bool): bool {
return BoolConstant(b);
}
macro from_constexpr<A : type>(l: constexpr LanguageMode): A;
from_constexpr<LanguageMode>(b: constexpr LanguageMode): LanguageMode {
return LanguageModeConstant(b);
}
macro from_constexpr<A : type>(e: constexpr ElementsKind): A;
from_constexpr<ElementsKind>(e: constexpr ElementsKind): ElementsKind {
return Int32Constant(e);
}
macro from_constexpr<A : type>(s: constexpr string): A;
from_constexpr<String>(s: constexpr string): String {
return StringConstant(s);
}
from_constexpr<Object>(s: constexpr string): Object {
return StringConstant(s);
}
extern operator 'convert<>' macro ChangeInt32ToTagged(int32): Number;
extern operator 'convert<>' macro TruncateWordToWord32(intptr): int32;
extern operator 'convert<>' macro SmiTag(intptr): Smi;
extern operator 'convert<>' macro SmiFromInt32(int32): Smi;
extern operator 'convert<>' macro SmiUntag(Smi): intptr;
macro convert<A : type>(i: constexpr int31): A {
return i;
}
macro convert<A : type>(i: int32): A;
convert<Number>(i: int32): Number {
return ChangeInt32ToTagged(i);
}
convert<intptr>(i: int32): intptr {
return ChangeInt32ToIntPtr(i);
}
convert<Smi>(i: int32): Smi {
return SmiFromInt32(i);
}
macro convert<A : type>(ui: uint32): A;
convert<Number>(ui: uint32): Number {
return ChangeUint32ToTagged(ui);
}
convert<Smi>(ui: uint32): Smi {
return SmiFromInt32(Signed(ui));
}
convert<uintptr>(ui: uint32): uintptr {
return ChangeUint32ToWord(ui);
}
macro convert<A : type>(i: intptr): A;
convert<int32>(i: intptr): int32 {
return TruncateIntPtrToInt32(i);
}
convert<Smi>(i: intptr): Smi {
return SmiTag(i);
}
macro convert<A : type>(ui: uintptr): A;
convert<uint32>(ui: uintptr): uint32 {
return Unsigned(TruncateIntPtrToInt32(Signed(ui)));
}
macro convert<A : type>(s: Smi): A;
convert<intptr>(s: Smi): intptr {
return SmiUntag(s);
}
convert<int32>(s: Smi): int32 {
return SmiToInt32(s);
}
macro convert<A : type>(h: HeapNumber): A;
convert<float64>(h: HeapNumber): float64 {
return LoadHeapNumberValue(h);
}
macro convert<A : type>(n: Number): A;
convert<float64>(n: Number): float64 {
return ChangeNumberToFloat64(n);
}
macro convert<A : type>(f: float32): A;
convert<float64>(f: float32): float64 {
return ChangeFloat32ToFloat64(f);
}
macro convert<A : type>(d: float64): A;
convert<Number>(d: float64): Number {
return AllocateHeapNumberWithValue(d);
}
convert<uintptr>(d: float64): uintptr {
return ChangeFloat64ToUintPtr(d);
}
macro convert<A : type>(r: RawPtr): A;
convert<uintptr>(r: RawPtr): uintptr {
return Unsigned(r);
}
convert<intptr>(r: RawPtr): intptr {
return Signed(r);
}
extern macro UnsafeCastNumberToHeapNumber(Number): HeapNumber;
extern macro UnsafeCastObjectToFixedArrayBase(Object): FixedArrayBase;
extern macro UnsafeCastObjectToFixedArray(Object): FixedArray;
extern macro UnsafeCastObjectToFixedDoubleArray(Object): FixedDoubleArray;
extern macro UnsafeCastObjectToHeapNumber(Object): HeapNumber;
extern macro UnsafeCastObjectToCallable(Object): Callable;
extern macro UnsafeCastObjectToSmi(Object): Smi;
extern macro UnsafeCastObjectToNumber(Object): Number;
extern macro UnsafeCastObjectToHeapObject(Object): HeapObject;
extern macro UnsafeCastObjectToJSArray(Object): JSArray;
extern macro UnsafeCastObjectToFixedTypedArrayBase(Object): FixedTypedArrayBase;
extern macro UnsafeCastObjectToNumberDictionary(Object): NumberDictionary;
extern macro UnsafeCastObjectToJSReceiver(Object): JSReceiver;
extern macro UnsafeCastObjectToJSObject(Object): JSObject;
extern macro UnsafeCastObjectToMap(Object): Map;
macro unsafe_cast<A : type>(n: Number): A;
unsafe_cast<HeapNumber>(n: Number): HeapNumber {
return UnsafeCastNumberToHeapNumber(n);
}
macro unsafe_cast<A : type>(o: Object): A;
unsafe_cast<FixedArray>(o: Object): FixedArray {
return UnsafeCastObjectToFixedArray(o);
}
unsafe_cast<FixedDoubleArray>(o: Object): FixedDoubleArray {
return UnsafeCastObjectToFixedDoubleArray(o);
}
unsafe_cast<HeapNumber>(o: Object): HeapNumber {
return UnsafeCastObjectToHeapNumber(o);
}
unsafe_cast<Callable>(o: Object): Callable {
return UnsafeCastObjectToCallable(o);
}
unsafe_cast<Smi>(o: Object): Smi {
return UnsafeCastObjectToSmi(o);
}
unsafe_cast<Number>(o: Object): Number {
return UnsafeCastObjectToNumber(o);
}
unsafe_cast<HeapObject>(o: Object): HeapObject {
return UnsafeCastObjectToHeapObject(o);
}
unsafe_cast<JSArray>(o: Object): JSArray {
return UnsafeCastObjectToJSArray(o);
}
unsafe_cast<FixedTypedArrayBase>(o: Object): FixedTypedArrayBase {
return UnsafeCastObjectToFixedTypedArrayBase(o);
}
unsafe_cast<NumberDictionary>(o: Object): NumberDictionary {
return UnsafeCastObjectToNumberDictionary(o);
}
unsafe_cast<JSReceiver>(o: Object): JSReceiver {
return UnsafeCastObjectToJSReceiver(o);
}
unsafe_cast<JSObject>(o: Object): JSObject {
return UnsafeCastObjectToJSObject(o);
}
unsafe_cast<Map>(o: Object): Map {
return UnsafeCastObjectToMap(o);
}
unsafe_cast<FixedArrayBase>(o: Object): FixedArrayBase {
return UnsafeCastObjectToFixedArrayBase(o);
}
const kCOWMap: Map = unsafe_cast<Map>(LoadRoot(kFixedCOWArrayMapRootIndex));
const kEmptyFixedArray: FixedArrayBase = unsafe_cast<FixedArrayBase>(
LoadRoot(kEmptyFixedArrayRootIndex));
extern macro BranchIfFastJSArray(Object, Context): never labels Taken, NotTaken;
extern macro BranchIfNotFastJSArray(Object, Context): never labels Taken,
@ -251,28 +595,51 @@ extern operator '.elements_kind' macro LoadMapElementsKind(Map): ElementsKind;
extern operator
'.elements_kind' macro LoadElementsKind(JSTypedArray): ElementsKind;
extern operator '.elements' macro LoadElements(Object): FixedArrayBase;
extern operator '.elements=' macro StoreElements(Object, FixedArrayBase);
extern operator '.elements' macro LoadElements(JSObject): FixedArrayBase;
extern operator '.elements=' macro StoreElements(JSObject, FixedArrayBase);
extern operator '.length' macro LoadTypedArrayLength(JSTypedArray): Smi;
extern operator '.length' macro LoadJSArrayLength(JSArray): Number;
extern operator '.length_fast' macro LoadFastJSArrayLength(JSArray): Smi;
extern operator '.length=' macro StoreJSArrayLength(JSArray, Smi);
extern operator '.length' macro LoadFixedArrayBaseLength(FixedArrayBase): Smi;
extern operator '[]' macro LoadFixedArrayElement(FixedArray, intptr): Object;
extern operator '[]' macro LoadFixedArrayElement(FixedArray, Smi): Object;
extern operator
'[]' macro LoadFixedArrayElementInt(FixedArray, constexpr int31): Object;
extern operator
'[]=' macro StoreFixedArrayElement(FixedArray, intptr, Object): void;
extern operator
'[]=' macro StoreFixedArrayElementInt(
FixedArray, constexpr int31, Object): void;
extern operator
'[]=' macro StoreFixedArrayElementSmi(FixedArray, Smi, Object): void;
extern operator '.instance_type' macro LoadMapInstanceType(Map): int32;
extern macro LoadFixedDoubleArrayElement(FixedDoubleArray, Smi): float64;
extern macro Float64SilenceNaN(float64): float64;
extern macro StoreFixedDoubleArrayElement(
FixedDoubleArray, Object, float64, constexpr ParameterMode);
macro StoreFixedDoubleArrayElementWithSmiIndex(
array: FixedDoubleArray, index: Smi, value: float64) {
StoreFixedDoubleArrayElement(array, index, value, SMI_PARAMETERS);
}
extern macro BasicLoadNumberDictionaryElement(NumberDictionary, intptr):
Object labels NotData, IfHole;
extern macro BasicStoreNumberDictionaryElement(NumberDictionary, intptr, Object)
labels NotData, IfHole, ReadOnly;
extern macro IsFastElementsKind(ElementsKind): bool;
extern macro IsDoubleElementsKind(ElementsKind): bool;
extern macro IsFastSmiOrTaggedElementsKind(ElementsKind): bool;
extern macro IsFastSmiElementsKind(ElementsKind): bool;
extern macro IsHoleyFastElementsKind(ElementsKind): bool;
extern macro AllocateFixedArray(constexpr ElementsKind, Smi): FixedArray;
extern macro AllocateFixedArray(constexpr ElementsKind, Smi, Map): FixedArray;
extern macro AllocateFixedArray(constexpr ElementsKind, intptr): FixedArray;
extern macro CopyFixedArrayElements(
constexpr ElementsKind, FixedArray, constexpr ElementsKind, FixedArray,
@ -289,17 +656,19 @@ extern macro IsElementsKindGreaterThan(
extern macro LoadDoubleWithHoleCheck(FixedDoubleArray, Smi): float64
labels IfHole;
extern macro Call(Context, Callable, Object, ...): Object;
extern macro Call(Context, Callable, Object): Object;
extern macro Call(Context, Callable, Object, Object): Object;
extern macro Call(Context, Callable, Object, Object, Object): Object;
extern macro Call(Context, Callable, Object, Object, Object, Object): Object;
extern macro Call(Context, Callable, Object, Object, Object, Object, Object): Object;
extern macro Call(Context, Callable, Object, Object, Object, Object, Object, Object): Object;
extern macro ExtractFixedArray(
FixedArray, Smi, Smi, Smi, constexpr ExtractFixedArrayFlags): FixedArray;
extern builtin ExtractFastJSArray(Context, JSArray, Smi, Smi): JSArray;
macro LoadElementNoHole<T : type>(a: JSArray, index: Smi): Object
labels IfHole {
unreachable;
}
macro LoadElementNoHole<T : type>(a: JSArray, index: Smi): Object labels IfHole;
LoadElementNoHole<FixedArray>(a: JSArray, index: Smi): Object
labels IfHole {
@ -331,9 +700,10 @@ labels IfHole {
}
macro HasPropertyObject(
o: Object, p: Object, c: Context, f: HasPropertyFlag): Oddball {
o: Object, p: Object, c: Context,
f: constexpr HasPropertyLookupMode): Oddball {
try {
return HasProperty(cast<HeapObject>(o) otherwise CastError, p, c, f);
return HasProperty((cast<HeapObject>(o) otherwise CastError), p, c, f);
}
label CastError {
return False;
@ -341,5 +711,41 @@ macro HasPropertyObject(
}
extern macro IsCallable(HeapObject): bool;
extern macro IsJSArray(HeapObject): bool;
extern macro TaggedIsCallable(Object): bool;
extern macro IsDetachedBuffer(JSArrayBuffer): bool;
extern macro IsHeapNumber(HeapObject): bool;
extern macro IsExtensibleMap(Map): bool;
extern macro IsCustomElementsReceiverInstanceType(int32): bool;
extern macro Typeof(Object): Object;
// Return true iff number is NaN.
macro NumberIsNaN(number: Number): bool {
if (TaggedIsSmi(number)) return false;
let value: float64 = convert<float64>(unsafe_cast<HeapNumber>(number));
return value != value;
}
extern macro BranchIfToBooleanIsTrue(Object): never labels Taken, NotTaken;
macro ToBoolean(obj: Object): bool {
if (BranchIfToBooleanIsTrue(obj)) {
return true;
} else {
return false;
}
}
macro ToIndex(input: Object, context: Context): Number labels RangeError {
if (input == Undefined) {
return 0;
}
let value: Number = ToInteger_Inline(context, input, kTruncateMinusZero);
if (value < 0 || value > kMaxSafeInteger) {
goto RangeError;
}
return value;
}

View File

@ -64,7 +64,7 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> HandleApiCallHelper(
ObjectTemplateInfo::cast(fun_data->instance_template()), isolate);
ASSIGN_RETURN_ON_EXCEPTION(
isolate, js_receiver,
ApiNatives::InstantiateObject(instance_template,
ApiNatives::InstantiateObject(isolate, instance_template,
Handle<JSReceiver>::cast(new_target)),
Object);
args[0] = *js_receiver;
@ -80,7 +80,8 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> HandleApiCallHelper(
// Proxies never need access checks.
DCHECK(js_receiver->IsJSObject());
Handle<JSObject> js_obj_receiver = Handle<JSObject>::cast(js_receiver);
if (!isolate->MayAccess(handle(isolate->context()), js_obj_receiver)) {
if (!isolate->MayAccess(handle(isolate->context(), isolate),
js_obj_receiver)) {
isolate->ReportFailedAccessCheck(js_obj_receiver);
RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
return isolate->factory()->undefined_value();
@ -222,7 +223,8 @@ MaybeHandle<Object> Builtins::InvokeApiFunction(Isolate* isolate,
argv[cursor--] = *args[i];
}
DCHECK_EQ(cursor, BuiltinArguments::kPaddingOffset);
argv[BuiltinArguments::kPaddingOffset] = isolate->heap()->the_hole_value();
argv[BuiltinArguments::kPaddingOffset] =
ReadOnlyRoots(isolate).the_hole_value();
argv[BuiltinArguments::kArgcOffset] = Smi::FromInt(frame_argc);
argv[BuiltinArguments::kTargetOffset] = *function;
argv[BuiltinArguments::kNewTargetOffset] = *new_target;
@ -260,7 +262,7 @@ V8_WARN_UNUSED_RESULT static Object* HandleApiCallAsFunctionOrConstructor(
// right answer.
new_target = obj;
} else {
new_target = isolate->heap()->undefined_value();
new_target = ReadOnlyRoots(isolate).undefined_value();
}
// Get the invocation callback from the function descriptor that was
@ -284,7 +286,7 @@ V8_WARN_UNUSED_RESULT static Object* HandleApiCallAsFunctionOrConstructor(
args.length() - 1);
Handle<Object> result_handle = custom.Call(call_data);
if (result_handle.is_null()) {
result = isolate->heap()->undefined_value();
result = ReadOnlyRoots(isolate).undefined_value();
} else {
result = *result_handle;
}

View File

@ -12,6 +12,7 @@
#include "src/frame-constants.h"
#include "src/interface-descriptors.h"
#include "src/objects-inl.h"
#include "src/objects/arguments.h"
namespace v8 {
namespace internal {
@ -44,7 +45,7 @@ ArgumentsBuiltinsAssembler::GetArgumentsFrameAndCount(Node* function,
CSA_SLOW_ASSERT(this, HasInstanceType(shared, SHARED_FUNCTION_INFO_TYPE));
Node* formal_parameter_count =
LoadObjectField(shared, SharedFunctionInfo::kFormalParameterCountOffset,
MachineType::Int32());
MachineType::Uint16());
formal_parameter_count = Int32ToParameter(formal_parameter_count, mode);
argument_count.Bind(formal_parameter_count);

View File

@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/builtins/builtins-array-gen.h"
#include "src/builtins/builtins-iterator-gen.h"
#include "src/builtins/builtins-string-gen.h"
#include "src/builtins/builtins-typed-array-gen.h"
@ -10,8 +12,7 @@
#include "src/code-stub-assembler.h"
#include "src/frame-constants.h"
#include "src/heap/factory-inl.h"
#include "src/builtins/builtins-array-gen.h"
#include "src/objects/arguments-inl.h"
namespace v8 {
namespace internal {
@ -211,10 +212,7 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
context(), original_array, length, method_name);
// In the Spec and our current implementation, the length check is already
// performed in TypedArraySpeciesCreate.
CSA_ASSERT(
this,
SmiLessThanOrEqual(
CAST(len_), CAST(LoadObjectField(a, JSTypedArray::kLengthOffset))));
CSA_ASSERT(this, SmiLessThanOrEqual(CAST(len_), LoadTypedArrayLength(a)));
fast_typed_array_target_ =
Word32Equal(LoadInstanceType(LoadElements(original_array)),
LoadInstanceType(LoadElements(a)));
@ -248,7 +246,7 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
transition_smi_double(this);
Label array_not_smi(this), array_fast(this), array_double(this);
Node* kind = LoadMapElementsKind(LoadMap(a()));
TNode<Int32T> kind = LoadElementsKind(a());
Node* elements = LoadElements(a());
GotoIf(IsElementsKindGreaterThan(kind, HOLEY_SMI_ELEMENTS), &array_not_smi);
TryStoreArrayElement(HOLEY_SMI_ELEMENTS, mode, &transition_pre, elements, k,
@ -296,9 +294,18 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
Node* const native_context = LoadNativeContext(context());
Node* const double_map = LoadContextElement(
native_context, Context::JS_ARRAY_HOLEY_DOUBLE_ELEMENTS_MAP_INDEX);
CallStub(CodeFactory::TransitionElementsKind(
isolate(), HOLEY_SMI_ELEMENTS, HOLEY_DOUBLE_ELEMENTS, true),
context(), a(), double_map);
const ElementsKind kFromKind = HOLEY_SMI_ELEMENTS;
const ElementsKind kToKind = HOLEY_DOUBLE_ELEMENTS;
const bool kIsJSArray = true;
Label transition_in_runtime(this, Label::kDeferred);
TransitionElementsKind(a(), double_map, kFromKind, kToKind, kIsJSArray,
&transition_in_runtime);
Goto(&array_double);
BIND(&transition_in_runtime);
CallRuntime(Runtime::kTransitionElementsKind, context(), a(), double_map);
Goto(&array_double);
}
@ -419,28 +426,34 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
GotoIf(DoesntHaveInstanceType(o(), JS_ARRAY_TYPE), &not_js_array);
merged_length = LoadJSArrayLength(CAST(o()));
Goto(&has_length);
BIND(&not_js_array);
Node* len_property =
GetProperty(context(), o(), isolate()->factory()->length_string());
merged_length = ToLength_Inline(context(), len_property);
Goto(&has_length);
{
Node* len_property =
GetProperty(context(), o(), isolate()->factory()->length_string());
merged_length = ToLength_Inline(context(), len_property);
Goto(&has_length);
}
BIND(&has_length);
len_ = merged_length.value();
{
len_ = merged_length.value();
// 5. If IsCallable(callbackfn) is false, throw a TypeError exception.
Label type_exception(this, Label::kDeferred);
Label done(this);
GotoIf(TaggedIsSmi(callbackfn()), &type_exception);
Branch(IsCallableMap(LoadMap(callbackfn())), &done, &type_exception);
// 5. If IsCallable(callbackfn) is false, throw a TypeError exception.
Label type_exception(this, Label::kDeferred);
Label done(this);
GotoIf(TaggedIsSmi(callbackfn()), &type_exception);
Branch(IsCallableMap(LoadMap(callbackfn())), &done, &type_exception);
BIND(&throw_null_undefined_exception);
ThrowTypeError(context(), MessageTemplate::kCalledOnNullOrUndefined, name);
BIND(&throw_null_undefined_exception);
ThrowTypeError(context(), MessageTemplate::kCalledOnNullOrUndefined,
name);
BIND(&type_exception);
ThrowTypeError(context(), MessageTemplate::kCalledNonCallable,
callbackfn());
BIND(&type_exception);
ThrowTypeError(context(), MessageTemplate::kCalledNonCallable,
callbackfn());
BIND(&done);
BIND(&done);
}
// 6. If thisArg was supplied, let T be thisArg; else let T be undefined.
// [Already done by the arguments adapter]
@ -501,7 +514,7 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
LoadObjectField(typed_array, JSTypedArray::kBufferOffset);
GotoIf(IsDetachedBuffer(array_buffer), &throw_detached);
len_ = LoadObjectField<Smi>(typed_array, JSTypedArray::kLengthOffset);
len_ = LoadTypedArrayLength(typed_array);
Label throw_not_callable(this, Label::kDeferred);
Label distinguish_types(this);
@ -545,6 +558,7 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
} else {
k_.Bind(NumberDec(len()));
}
CSA_ASSERT(this, IsSafeInteger(k()));
Node* instance_type = LoadInstanceType(LoadElements(typed_array));
Switch(instance_type, &unexpected_instance_type, instance_types.data(),
label_ptrs.data(), labels.size());
@ -586,10 +600,9 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
Label done_element(this, &to_);
// a. Let Pk be ToString(k).
// We never have to perform a ToString conversion as the above guards
// guarantee that we have a positive {k} which also is a valid array
// index in the range [0, 2^32-1).
CSA_ASSERT(this, IsNumberArrayIndex(k()));
// k() is guaranteed to be a positive integer, hence ToString is
// side-effect free and HasProperty/GetProperty do the conversion inline.
CSA_ASSERT(this, IsSafeInteger(k()));
if (missing_property_mode == MissingPropertyMode::kSkip) {
// b. Let kPresent be HasProperty(O, Pk).
@ -902,9 +915,9 @@ Node* ArrayBuiltinsAssembler::FindProcessor(Node* k_value, Node* k) {
TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
TNode<Int32T> argc =
UncheckedCast<Int32T>(Parameter(BuiltinDescriptor::kArgumentsCount));
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
CSA_ASSERT(this, IsUndefined(Parameter(BuiltinDescriptor::kNewTarget)));
UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
CSA_ASSERT(this, IsUndefined(Parameter(Descriptor::kJSNewTarget)));
CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
TNode<Object> receiver = args.GetReceiver();
@ -953,7 +966,7 @@ TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
StoreObjectFieldNoWriteBarrier(array_receiver, JSArray::kLengthOffset,
SmiTag(new_length));
Node* elements_kind = LoadMapElementsKind(LoadMap(array_receiver));
TNode<Int32T> elements_kind = LoadElementsKind(array_receiver);
GotoIf(Int32LessThanOrEqual(elements_kind,
Int32Constant(TERMINAL_FAST_ELEMENTS_KIND)),
&fast_elements);
@ -994,10 +1007,12 @@ TF_BUILTIN(ArrayPrototypePop, CodeStubAssembler) {
BIND(&runtime);
{
Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
MachineType::TaggedPointer());
TailCallStub(CodeFactory::ArrayPop(isolate()), context, target,
UndefinedConstant(), argc);
// We are not using Parameter(Descriptor::kJSTarget) and loading the value
// from the current frame here in order to reduce register pressure on the
// fast path.
TNode<JSFunction> target = LoadTargetFromFrame();
TailCallBuiltin(Builtins::kArrayPop, context, target, UndefinedConstant(),
argc);
}
}
@ -1014,9 +1029,9 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
// TODO(ishell): use constants from Descriptor once the JSFunction linkage
// arguments are reordered.
TNode<Int32T> argc =
UncheckedCast<Int32T>(Parameter(BuiltinDescriptor::kArgumentsCount));
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
CSA_ASSERT(this, IsUndefined(Parameter(BuiltinDescriptor::kNewTarget)));
UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
CSA_ASSERT(this, IsUndefined(Parameter(Descriptor::kJSNewTarget)));
CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
TNode<Object> receiver = args.GetReceiver();
@ -1126,10 +1141,12 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) {
BIND(&runtime);
{
Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
MachineType::TaggedPointer());
TailCallStub(CodeFactory::ArrayPush(isolate()), context, target,
UndefinedConstant(), argc);
// We are not using Parameter(Descriptor::kJSTarget) and loading the value
// from the current frame here in order to reduce register pressure on the
// fast path.
TNode<JSFunction> target = LoadTargetFromFrame();
TailCallBuiltin(Builtins::kArrayPush, context, target, UndefinedConstant(),
argc);
}
}
@ -1170,8 +1187,8 @@ class ArrayPrototypeSliceCodeStubAssembler : public CodeStubAssembler {
CSA_ASSERT(this, SmiGreaterThanOrEqual(CAST(from), SmiConstant(0)));
result.Bind(CallStub(CodeFactory::ExtractFastJSArray(isolate()), context,
array, from, count));
result.Bind(CallBuiltin(Builtins::kExtractFastJSArray, context, array, from,
count));
Goto(&done);
BIND(&try_fast_arguments);
@ -1297,8 +1314,8 @@ class ArrayPrototypeSliceCodeStubAssembler : public CodeStubAssembler {
TF_BUILTIN(ArrayPrototypeSlice, ArrayPrototypeSliceCodeStubAssembler) {
Node* const argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Label slow(this, Label::kDeferred), fast_elements_kind(this);
CodeStubArguments args(this, argc);
@ -1326,7 +1343,7 @@ TF_BUILTIN(ArrayPrototypeSlice, ArrayPrototypeSliceCodeStubAssembler) {
BIND(&clone);
args.PopAndReturn(
CallStub(CodeFactory::CloneFastJSArray(isolate()), context, receiver));
CallBuiltin(Builtins::kCloneFastJSArray, context, receiver));
BIND(&check_arguments_length);
@ -1472,9 +1489,9 @@ TF_BUILTIN(ArrayPrototypeSlice, ArrayPrototypeSliceCodeStubAssembler) {
TF_BUILTIN(ArrayPrototypeShift, CodeStubAssembler) {
TNode<Int32T> argc =
UncheckedCast<Int32T>(Parameter(BuiltinDescriptor::kArgumentsCount));
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
CSA_ASSERT(this, IsUndefined(Parameter(BuiltinDescriptor::kNewTarget)));
UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
CSA_ASSERT(this, IsUndefined(Parameter(Descriptor::kJSNewTarget)));
CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
TNode<Object> receiver = args.GetReceiver();
@ -1531,7 +1548,7 @@ TF_BUILTIN(ArrayPrototypeShift, CodeStubAssembler) {
StoreObjectFieldNoWriteBarrier(array_receiver, JSArray::kLengthOffset,
SmiTag(new_length));
Node* elements_kind = LoadMapElementsKind(LoadMap(array_receiver));
TNode<Int32T> elements_kind = LoadElementsKind(array_receiver);
GotoIf(
Int32LessThanOrEqual(elements_kind, Int32Constant(HOLEY_SMI_ELEMENTS)),
&fast_elements_smi);
@ -1623,10 +1640,12 @@ TF_BUILTIN(ArrayPrototypeShift, CodeStubAssembler) {
BIND(&runtime);
{
Node* target = LoadFromFrame(StandardFrameConstants::kFunctionOffset,
MachineType::TaggedPointer());
TailCallStub(CodeFactory::ArrayShift(isolate()), context, target,
UndefinedConstant(), argc);
// We are not using Parameter(Descriptor::kJSTarget) and loading the value
// from the current frame here in order to reduce register pressure on the
// fast path.
TNode<JSFunction> target = LoadTargetFromFrame();
TailCallBuiltin(Builtins::kArrayShift, context, target, UndefinedConstant(),
argc);
}
}
@ -1736,9 +1755,9 @@ TF_BUILTIN(ArrayFindLoopAfterCallbackLazyDeoptContinuation,
// ES #sec-get-%typedarray%.prototype.find
TF_BUILTIN(ArrayPrototypeFind, ArrayBuiltinsAssembler) {
TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@ -1828,9 +1847,9 @@ TF_BUILTIN(ArrayFindIndexLoopAfterCallbackLazyDeoptContinuation,
// ES #sec-get-%typedarray%.prototype.findIndex
TF_BUILTIN(ArrayPrototypeFindIndex, ArrayBuiltinsAssembler) {
TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@ -1988,9 +2007,9 @@ class ArrayPopulatorAssembler : public CodeStubAssembler {
// ES #sec-array.from
TF_BUILTIN(ArrayFrom, ArrayPopulatorAssembler) {
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Int32T> argc =
UncheckedCast<Int32T>(Parameter(BuiltinDescriptor::kArgumentsCount));
UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
@ -2182,10 +2201,10 @@ TF_BUILTIN(ArrayFrom, ArrayPopulatorAssembler) {
// ES #sec-array.of
TF_BUILTIN(ArrayOf, ArrayPopulatorAssembler) {
TNode<Int32T> argc =
UncheckedCast<Int32T>(Parameter(BuiltinDescriptor::kArgumentsCount));
UncheckedCast<Int32T>(Parameter(Descriptor::kJSActualArgumentsCount));
TNode<Smi> length = SmiFromInt32(argc);
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
CodeStubArguments args(this, length, nullptr, ParameterMode::SMI_PARAMETERS);
@ -2208,9 +2227,9 @@ TF_BUILTIN(ArrayOf, ArrayPopulatorAssembler) {
// ES #sec-get-%typedarray%.prototype.find
TF_BUILTIN(TypedArrayPrototypeFind, ArrayBuiltinsAssembler) {
TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@ -2227,9 +2246,9 @@ TF_BUILTIN(TypedArrayPrototypeFind, ArrayBuiltinsAssembler) {
// ES #sec-get-%typedarray%.prototype.findIndex
TF_BUILTIN(TypedArrayPrototypeFindIndex, ArrayBuiltinsAssembler) {
TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@ -2245,9 +2264,9 @@ TF_BUILTIN(TypedArrayPrototypeFindIndex, ArrayBuiltinsAssembler) {
TF_BUILTIN(TypedArrayPrototypeForEach, ArrayBuiltinsAssembler) {
TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@ -2325,9 +2344,9 @@ TF_BUILTIN(ArraySomeLoopContinuation, ArrayBuiltinsAssembler) {
TF_BUILTIN(ArraySome, ArrayBuiltinsAssembler) {
TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@ -2344,9 +2363,9 @@ TF_BUILTIN(ArraySome, ArrayBuiltinsAssembler) {
TF_BUILTIN(TypedArrayPrototypeSome, ArrayBuiltinsAssembler) {
TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@ -2424,9 +2443,9 @@ TF_BUILTIN(ArrayEveryLoopContinuation, ArrayBuiltinsAssembler) {
TF_BUILTIN(ArrayEvery, ArrayBuiltinsAssembler) {
TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@ -2443,9 +2462,9 @@ TF_BUILTIN(ArrayEvery, ArrayBuiltinsAssembler) {
TF_BUILTIN(TypedArrayPrototypeEvery, ArrayBuiltinsAssembler) {
TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@ -2522,9 +2541,9 @@ TF_BUILTIN(ArrayReduceLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
TF_BUILTIN(ArrayReduce, ArrayBuiltinsAssembler) {
TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
@ -2542,9 +2561,9 @@ TF_BUILTIN(ArrayReduce, ArrayBuiltinsAssembler) {
TF_BUILTIN(TypedArrayPrototypeReduce, ArrayBuiltinsAssembler) {
TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
@ -2624,9 +2643,9 @@ TF_BUILTIN(ArrayReduceRightLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
TF_BUILTIN(ArrayReduceRight, ArrayBuiltinsAssembler) {
TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
@ -2646,9 +2665,9 @@ TF_BUILTIN(ArrayReduceRight, ArrayBuiltinsAssembler) {
TF_BUILTIN(TypedArrayPrototypeReduceRight, ArrayBuiltinsAssembler) {
TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* initial_value = args.GetOptionalArgumentValue(1, TheHoleConstant());
@ -2742,9 +2761,9 @@ TF_BUILTIN(ArrayFilterLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
TF_BUILTIN(ArrayFilter, ArrayBuiltinsAssembler) {
TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@ -2820,9 +2839,9 @@ TF_BUILTIN(ArrayMapLoopLazyDeoptContinuation, ArrayBuiltinsAssembler) {
TF_BUILTIN(ArrayMap, ArrayBuiltinsAssembler) {
TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@ -2839,9 +2858,9 @@ TF_BUILTIN(ArrayMap, ArrayBuiltinsAssembler) {
TF_BUILTIN(TypedArrayPrototypeMap, ArrayBuiltinsAssembler) {
TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<Object> receiver = args.GetReceiver();
Node* callbackfn = args.GetOptionalArgumentValue(0);
Node* this_arg = args.GetOptionalArgumentValue(1);
@ -2887,7 +2906,8 @@ class ArrayIncludesIndexofAssembler : public CodeStubAssembler {
enum SearchVariant { kIncludes, kIndexOf };
void Generate(SearchVariant variant);
void Generate(SearchVariant variant, TNode<IntPtrT> argc,
TNode<Context> context);
void GenerateSmiOrObject(SearchVariant variant, Node* context, Node* elements,
Node* search_element, Node* array_length,
Node* from_index);
@ -2899,18 +2919,17 @@ class ArrayIncludesIndexofAssembler : public CodeStubAssembler {
Node* from_index);
};
void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant) {
void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant,
TNode<IntPtrT> argc,
TNode<Context> context) {
const int kSearchElementArg = 0;
const int kFromIndexArg = 1;
TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
CodeStubArguments args(this, argc);
TNode<Object> receiver = args.GetReceiver();
TNode<Object> search_element =
args.GetOptionalArgumentValue(kSearchElementArg);
TNode<Context> context = CAST(Parameter(BuiltinDescriptor::kContext));
Node* intptr_zero = IntPtrConstant(0);
@ -2970,7 +2989,7 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant) {
Label if_smiorobjects(this), if_packed_doubles(this), if_holey_doubles(this);
Node* elements_kind = LoadMapElementsKind(LoadMap(array));
TNode<Int32T> elements_kind = LoadElementsKind(array);
Node* elements = LoadElements(array);
STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
STATIC_ASSERT(HOLEY_SMI_ELEMENTS == 1);
@ -3375,7 +3394,11 @@ void ArrayIncludesIndexofAssembler::GenerateHoleyDoubles(SearchVariant variant,
}
TF_BUILTIN(ArrayIncludes, ArrayIncludesIndexofAssembler) {
Generate(kIncludes);
TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Generate(kIncludes, argc, context);
}
TF_BUILTIN(ArrayIncludesSmiOrObject, ArrayIncludesIndexofAssembler) {
@ -3409,7 +3432,13 @@ TF_BUILTIN(ArrayIncludesHoleyDoubles, ArrayIncludesIndexofAssembler) {
from_index);
}
TF_BUILTIN(ArrayIndexOf, ArrayIncludesIndexofAssembler) { Generate(kIndexOf); }
TF_BUILTIN(ArrayIndexOf, ArrayIncludesIndexofAssembler) {
TNode<IntPtrT> argc =
ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
Generate(kIndexOf, argc, context);
}
TF_BUILTIN(ArrayIndexOfSmiOrObject, ArrayIncludesIndexofAssembler) {
Node* context = Parameter(Descriptor::kContext);
@ -3627,8 +3656,7 @@ TF_BUILTIN(ArrayIteratorPrototypeNext, CodeStubAssembler) {
Node* buffer = LoadObjectField(array, JSTypedArray::kBufferOffset);
GotoIf(IsDetachedBuffer(buffer), &if_detached);
TNode<Smi> length =
CAST(LoadObjectField(array, JSTypedArray::kLengthOffset));
TNode<Smi> length = LoadTypedArrayLength(CAST(array));
GotoIfNot(SmiBelow(CAST(index), length), &set_done);
@ -3921,12 +3949,12 @@ TF_BUILTIN(FlatMapIntoArray, ArrayFlattenAssembler) {
mapper_function, this_arg));
}
// https://tc39.github.io/proposal-flatMap/#sec-Array.prototype.flatten
TF_BUILTIN(ArrayPrototypeFlatten, CodeStubAssembler) {
// https://tc39.github.io/proposal-flatMap/#sec-Array.prototype.flat
TF_BUILTIN(ArrayPrototypeFlat, CodeStubAssembler) {
Node* const argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
Node* const context = Parameter(BuiltinDescriptor::kContext);
Node* const context = Parameter(Descriptor::kContext);
Node* const receiver = args.GetReceiver();
Node* const depth = args.GetOptionalArgumentValue(0);
@ -3967,9 +3995,9 @@ TF_BUILTIN(ArrayPrototypeFlatten, CodeStubAssembler) {
// https://tc39.github.io/proposal-flatMap/#sec-Array.prototype.flatMap
TF_BUILTIN(ArrayPrototypeFlatMap, CodeStubAssembler) {
Node* const argc =
ChangeInt32ToIntPtr(Parameter(BuiltinDescriptor::kArgumentsCount));
ChangeInt32ToIntPtr(Parameter(Descriptor::kJSActualArgumentsCount));
CodeStubArguments args(this, argc);
Node* const context = Parameter(BuiltinDescriptor::kContext);
Node* const context = Parameter(Descriptor::kContext);
Node* const receiver = args.GetReceiver();
Node* const mapper_function = args.GetOptionalArgumentValue(0);
@ -4005,5 +4033,405 @@ TF_BUILTIN(ArrayPrototypeFlatMap, CodeStubAssembler) {
{ ThrowTypeError(context, MessageTemplate::kMapperFunctionNonCallable); }
}
TF_BUILTIN(ArrayConstructor, ArrayBuiltinsAssembler) {
// This is a trampoline to ArrayConstructorImpl which just adds
// allocation_site parameter value and sets new_target if necessary.
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<JSFunction> function = CAST(Parameter(Descriptor::kTarget));
TNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
TNode<Int32T> argc =
UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
// If new_target is undefined, then this is the 'Call' case, so set new_target
// to function.
new_target =
SelectConstant<Object>(IsUndefined(new_target), function, new_target);
// Run the native code for the Array function called as a normal function.
TNode<Object> no_allocation_site = UndefinedConstant();
TailCallBuiltin(Builtins::kArrayConstructorImpl, context, function,
new_target, argc, no_allocation_site);
}
void ArrayBuiltinsAssembler::TailCallArrayConstructorStub(
const Callable& callable, TNode<Context> context, TNode<JSFunction> target,
TNode<HeapObject> allocation_site_or_undefined, TNode<Int32T> argc) {
TNode<Code> code = HeapConstant(callable.code());
// We are going to call here ArrayNoArgumentsConstructor or
// ArraySingleArgumentsConstructor which in addition to the register arguments
// also expect some number of arguments on the expression stack.
// Since
// 1) incoming JS arguments are still on the stack,
// 2) the ArrayNoArgumentsConstructor, ArraySingleArgumentsConstructor and
// ArrayNArgumentsConstructor are defined so that the register arguments
// are passed on the same registers,
// in order to be able to generate a tail call to those builtins we do the
// following trick here: we tail call to the constructor builtin using
// ArrayNArgumentsConstructorDescriptor, so the tail call instruction
// pops the current frame but leaves all the incoming JS arguments on the
// expression stack so that the target builtin can still find them where it
// expects.
TailCallStub(ArrayNArgumentsConstructorDescriptor{}, code, context, target,
allocation_site_or_undefined, argc);
}
void ArrayBuiltinsAssembler::CreateArrayDispatchNoArgument(
TNode<Context> context, TNode<JSFunction> target, TNode<Int32T> argc,
AllocationSiteOverrideMode mode, TNode<AllocationSite> allocation_site) {
if (mode == DISABLE_ALLOCATION_SITES) {
Callable callable = CodeFactory::ArrayNoArgumentConstructor(
isolate(), GetInitialFastElementsKind(), mode);
TailCallArrayConstructorStub(callable, context, target, UndefinedConstant(),
argc);
} else {
DCHECK_EQ(mode, DONT_OVERRIDE);
TNode<Int32T> elements_kind = LoadElementsKind(allocation_site);
// TODO(ishell): Compute the builtin index dynamically instead of
// iterating over all expected elements kinds.
int last_index =
GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= last_index; ++i) {
Label next(this);
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
GotoIfNot(Word32Equal(elements_kind, Int32Constant(kind)), &next);
Callable callable =
CodeFactory::ArrayNoArgumentConstructor(isolate(), kind, mode);
TailCallArrayConstructorStub(callable, context, target, allocation_site,
argc);
BIND(&next);
}
// If we reached this point there is a problem.
Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
}
}
void ArrayBuiltinsAssembler::CreateArrayDispatchSingleArgument(
TNode<Context> context, TNode<JSFunction> target, TNode<Int32T> argc,
AllocationSiteOverrideMode mode, TNode<AllocationSite> allocation_site) {
if (mode == DISABLE_ALLOCATION_SITES) {
ElementsKind initial = GetInitialFastElementsKind();
ElementsKind holey_initial = GetHoleyElementsKind(initial);
Callable callable = CodeFactory::ArraySingleArgumentConstructor(
isolate(), holey_initial, mode);
TailCallArrayConstructorStub(callable, context, target, UndefinedConstant(),
argc);
} else {
DCHECK_EQ(mode, DONT_OVERRIDE);
TNode<Smi> transition_info = LoadTransitionInfo(allocation_site);
// Least significant bit in fast array elements kind means holeyness.
STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
STATIC_ASSERT(HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(PACKED_ELEMENTS == 2);
STATIC_ASSERT(HOLEY_ELEMENTS == 3);
STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS == 4);
STATIC_ASSERT(HOLEY_DOUBLE_ELEMENTS == 5);
Label normal_sequence(this);
TVARIABLE(Int32T, var_elements_kind,
Signed(DecodeWord32<AllocationSite::ElementsKindBits>(
SmiToInt32(transition_info))));
// Is the low bit set? If so, we are holey and that is good.
int fast_elements_kind_holey_mask =
AllocationSite::ElementsKindBits::encode(static_cast<ElementsKind>(1));
GotoIf(IsSetSmi(transition_info, fast_elements_kind_holey_mask),
&normal_sequence);
{
// Make elements kind holey and update elements kind in the type info.
var_elements_kind =
Signed(Word32Or(var_elements_kind.value(), Int32Constant(1)));
StoreObjectFieldNoWriteBarrier(
allocation_site, AllocationSite::kTransitionInfoOrBoilerplateOffset,
SmiOr(transition_info, SmiConstant(fast_elements_kind_holey_mask)));
Goto(&normal_sequence);
}
BIND(&normal_sequence);
// TODO(ishell): Compute the builtin index dynamically instead of
// iterating over all expected elements kinds.
// TODO(ishell): Given that the code above ensures that the elements kind
// is holey we can skip checking with non-holey elements kinds.
int last_index =
GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= last_index; ++i) {
Label next(this);
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
GotoIfNot(Word32Equal(var_elements_kind.value(), Int32Constant(kind)),
&next);
Callable callable =
CodeFactory::ArraySingleArgumentConstructor(isolate(), kind, mode);
TailCallArrayConstructorStub(callable, context, target, allocation_site,
argc);
BIND(&next);
}
// If we reached this point there is a problem.
Abort(AbortReason::kUnexpectedElementsKindInArrayConstructor);
}
}
void ArrayBuiltinsAssembler::GenerateDispatchToArrayStub(
TNode<Context> context, TNode<JSFunction> target, TNode<Int32T> argc,
AllocationSiteOverrideMode mode, TNode<AllocationSite> allocation_site) {
Label check_one_case(this), fallthrough(this);
GotoIfNot(Word32Equal(argc, Int32Constant(0)), &check_one_case);
CreateArrayDispatchNoArgument(context, target, argc, mode, allocation_site);
BIND(&check_one_case);
GotoIfNot(Word32Equal(argc, Int32Constant(1)), &fallthrough);
CreateArrayDispatchSingleArgument(context, target, argc, mode,
allocation_site);
BIND(&fallthrough);
}
TF_BUILTIN(ArrayConstructorImpl, ArrayBuiltinsAssembler) {
TNode<JSFunction> target = CAST(Parameter(Descriptor::kTarget));
TNode<Object> new_target = CAST(Parameter(Descriptor::kNewTarget));
TNode<Int32T> argc =
UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
TNode<HeapObject> maybe_allocation_site =
CAST(Parameter(Descriptor::kAllocationSite));
// Initial map for the builtin Array functions should be Map.
CSA_ASSERT(this, IsMap(CAST(LoadObjectField(
target, JSFunction::kPrototypeOrInitialMapOffset))));
// We should either have undefined or a valid AllocationSite
CSA_ASSERT(this, Word32Or(IsUndefined(maybe_allocation_site),
IsAllocationSite(maybe_allocation_site)));
// "Enter" the context of the Array function.
TNode<Context> context =
CAST(LoadObjectField(target, JSFunction::kContextOffset));
Label runtime(this, Label::kDeferred);
GotoIf(WordNotEqual(target, new_target), &runtime);
Label no_info(this);
// If the feedback vector is the undefined value call an array constructor
// that doesn't use AllocationSites.
GotoIf(IsUndefined(maybe_allocation_site), &no_info);
GenerateDispatchToArrayStub(context, target, argc, DONT_OVERRIDE,
CAST(maybe_allocation_site));
Goto(&runtime);
BIND(&no_info);
GenerateDispatchToArrayStub(context, target, argc, DISABLE_ALLOCATION_SITES);
Goto(&runtime);
BIND(&runtime);
GenerateArrayNArgumentsConstructor(context, target, new_target, argc,
maybe_allocation_site);
}
void ArrayBuiltinsAssembler::GenerateConstructor(
Node* context, Node* array_function, Node* array_map, Node* array_size,
Node* allocation_site, ElementsKind elements_kind,
AllocationSiteMode mode) {
Label ok(this);
Label smi_size(this);
Label small_smi_size(this);
Label call_runtime(this, Label::kDeferred);
Branch(TaggedIsSmi(array_size), &smi_size, &call_runtime);
BIND(&smi_size);
if (IsFastPackedElementsKind(elements_kind)) {
Label abort(this, Label::kDeferred);
Branch(SmiEqual(CAST(array_size), SmiConstant(0)), &small_smi_size, &abort);
BIND(&abort);
Node* reason = SmiConstant(AbortReason::kAllocatingNonEmptyPackedArray);
TailCallRuntime(Runtime::kAbort, context, reason);
} else {
int element_size =
IsDoubleElementsKind(elements_kind) ? kDoubleSize : kPointerSize;
int max_fast_elements =
(kMaxRegularHeapObjectSize - FixedArray::kHeaderSize - JSArray::kSize -
AllocationMemento::kSize) /
element_size;
Branch(SmiAboveOrEqual(CAST(array_size), SmiConstant(max_fast_elements)),
&call_runtime, &small_smi_size);
}
BIND(&small_smi_size);
{
Node* array = AllocateJSArray(
elements_kind, array_map, array_size, array_size,
mode == DONT_TRACK_ALLOCATION_SITE ? nullptr : allocation_site,
CodeStubAssembler::SMI_PARAMETERS);
Return(array);
}
BIND(&call_runtime);
{
TailCallRuntime(Runtime::kNewArray, context, array_function, array_size,
array_function, allocation_site);
}
}
void ArrayBuiltinsAssembler::GenerateArrayNoArgumentConstructor(
ElementsKind kind, AllocationSiteOverrideMode mode) {
typedef ArrayNoArgumentConstructorDescriptor Descriptor;
Node* native_context = LoadObjectField(Parameter(Descriptor::kFunction),
JSFunction::kContextOffset);
bool track_allocation_site =
AllocationSite::ShouldTrack(kind) && mode != DISABLE_ALLOCATION_SITES;
Node* allocation_site =
track_allocation_site ? Parameter(Descriptor::kAllocationSite) : nullptr;
Node* array_map = LoadJSArrayElementsMap(kind, native_context);
Node* array = AllocateJSArray(
kind, array_map, IntPtrConstant(JSArray::kPreallocatedArrayElements),
SmiConstant(0), allocation_site);
Return(array);
}
void ArrayBuiltinsAssembler::GenerateArraySingleArgumentConstructor(
ElementsKind kind, AllocationSiteOverrideMode mode) {
typedef ArraySingleArgumentConstructorDescriptor Descriptor;
Node* context = Parameter(Descriptor::kContext);
Node* function = Parameter(Descriptor::kFunction);
Node* native_context = LoadObjectField(function, JSFunction::kContextOffset);
Node* array_map = LoadJSArrayElementsMap(kind, native_context);
AllocationSiteMode allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
if (mode == DONT_OVERRIDE) {
allocation_site_mode = AllocationSite::ShouldTrack(kind)
? TRACK_ALLOCATION_SITE
: DONT_TRACK_ALLOCATION_SITE;
}
Node* array_size = Parameter(Descriptor::kArraySizeSmiParameter);
Node* allocation_site = Parameter(Descriptor::kAllocationSite);
GenerateConstructor(context, function, array_map, array_size, allocation_site,
kind, allocation_site_mode);
}
void ArrayBuiltinsAssembler::GenerateArrayNArgumentsConstructor(
TNode<Context> context, TNode<JSFunction> target, TNode<Object> new_target,
TNode<Int32T> argc, TNode<HeapObject> maybe_allocation_site) {
// Replace incoming JS receiver argument with the target.
// TODO(ishell): Avoid replacing the target on the stack and just add it
// as another additional parameter for Runtime::kNewArray.
CodeStubArguments args(this, ChangeInt32ToIntPtr(argc));
args.SetReceiver(target);
// Adjust arguments count for the runtime call: +1 for implicit receiver
// and +2 for new_target and maybe_allocation_site.
argc = Int32Add(argc, Int32Constant(3));
TailCallRuntime(Runtime::kNewArray, argc, context, new_target,
maybe_allocation_site);
}
TF_BUILTIN(ArrayNArgumentsConstructor, ArrayBuiltinsAssembler) {
TNode<Context> context = CAST(Parameter(Descriptor::kContext));
TNode<JSFunction> target = CAST(Parameter(Descriptor::kFunction));
TNode<Int32T> argc =
UncheckedCast<Int32T>(Parameter(Descriptor::kActualArgumentsCount));
TNode<HeapObject> maybe_allocation_site =
CAST(Parameter(Descriptor::kAllocationSite));
GenerateArrayNArgumentsConstructor(context, target, target, argc,
maybe_allocation_site);
}
void ArrayBuiltinsAssembler::GenerateInternalArrayNoArgumentConstructor(
ElementsKind kind) {
typedef ArrayNoArgumentConstructorDescriptor Descriptor;
Node* array_map = LoadObjectField(Parameter(Descriptor::kFunction),
JSFunction::kPrototypeOrInitialMapOffset);
Node* array = AllocateJSArray(
kind, array_map, IntPtrConstant(JSArray::kPreallocatedArrayElements),
SmiConstant(0));
Return(array);
}
void ArrayBuiltinsAssembler::GenerateInternalArraySingleArgumentConstructor(
ElementsKind kind) {
typedef ArraySingleArgumentConstructorDescriptor Descriptor;
Node* context = Parameter(Descriptor::kContext);
Node* function = Parameter(Descriptor::kFunction);
Node* array_map =
LoadObjectField(function, JSFunction::kPrototypeOrInitialMapOffset);
Node* array_size = Parameter(Descriptor::kArraySizeSmiParameter);
Node* allocation_site = UndefinedConstant();
GenerateConstructor(context, function, array_map, array_size, allocation_site,
kind, DONT_TRACK_ALLOCATION_SITE);
}
#define GENERATE_ARRAY_CTOR(name, kind_camel, kind_caps, mode_camel, \
mode_caps) \
TF_BUILTIN(Array##name##Constructor_##kind_camel##_##mode_camel, \
ArrayBuiltinsAssembler) { \
GenerateArray##name##Constructor(kind_caps, mode_caps); \
}
// The ArrayNoArgumentConstructor builtin family.
GENERATE_ARRAY_CTOR(NoArgument, PackedSmi, PACKED_SMI_ELEMENTS, DontOverride,
DONT_OVERRIDE);
GENERATE_ARRAY_CTOR(NoArgument, HoleySmi, HOLEY_SMI_ELEMENTS, DontOverride,
DONT_OVERRIDE);
GENERATE_ARRAY_CTOR(NoArgument, PackedSmi, PACKED_SMI_ELEMENTS,
DisableAllocationSites, DISABLE_ALLOCATION_SITES);
GENERATE_ARRAY_CTOR(NoArgument, HoleySmi, HOLEY_SMI_ELEMENTS,
DisableAllocationSites, DISABLE_ALLOCATION_SITES);
GENERATE_ARRAY_CTOR(NoArgument, Packed, PACKED_ELEMENTS, DisableAllocationSites,
DISABLE_ALLOCATION_SITES);
GENERATE_ARRAY_CTOR(NoArgument, Holey, HOLEY_ELEMENTS, DisableAllocationSites,
DISABLE_ALLOCATION_SITES);
GENERATE_ARRAY_CTOR(NoArgument, PackedDouble, PACKED_DOUBLE_ELEMENTS,
DisableAllocationSites, DISABLE_ALLOCATION_SITES);
GENERATE_ARRAY_CTOR(NoArgument, HoleyDouble, HOLEY_DOUBLE_ELEMENTS,
DisableAllocationSites, DISABLE_ALLOCATION_SITES);
// The ArraySingleArgumentConstructor builtin family.
GENERATE_ARRAY_CTOR(SingleArgument, PackedSmi, PACKED_SMI_ELEMENTS,
DontOverride, DONT_OVERRIDE);
GENERATE_ARRAY_CTOR(SingleArgument, HoleySmi, HOLEY_SMI_ELEMENTS, DontOverride,
DONT_OVERRIDE);
GENERATE_ARRAY_CTOR(SingleArgument, PackedSmi, PACKED_SMI_ELEMENTS,
DisableAllocationSites, DISABLE_ALLOCATION_SITES);
GENERATE_ARRAY_CTOR(SingleArgument, HoleySmi, HOLEY_SMI_ELEMENTS,
DisableAllocationSites, DISABLE_ALLOCATION_SITES);
GENERATE_ARRAY_CTOR(SingleArgument, Packed, PACKED_ELEMENTS,
DisableAllocationSites, DISABLE_ALLOCATION_SITES);
GENERATE_ARRAY_CTOR(SingleArgument, Holey, HOLEY_ELEMENTS,
DisableAllocationSites, DISABLE_ALLOCATION_SITES);
GENERATE_ARRAY_CTOR(SingleArgument, PackedDouble, PACKED_DOUBLE_ELEMENTS,
DisableAllocationSites, DISABLE_ALLOCATION_SITES);
GENERATE_ARRAY_CTOR(SingleArgument, HoleyDouble, HOLEY_DOUBLE_ELEMENTS,
DisableAllocationSites, DISABLE_ALLOCATION_SITES);
#undef GENERATE_ARRAY_CTOR
#define GENERATE_INTERNAL_ARRAY_CTOR(name, kind_camel, kind_caps) \
TF_BUILTIN(InternalArray##name##Constructor_##kind_camel, \
ArrayBuiltinsAssembler) { \
GenerateInternalArray##name##Constructor(kind_caps); \
}
GENERATE_INTERNAL_ARRAY_CTOR(NoArgument, Packed, PACKED_ELEMENTS);
GENERATE_INTERNAL_ARRAY_CTOR(NoArgument, Holey, HOLEY_ELEMENTS);
GENERATE_INTERNAL_ARRAY_CTOR(SingleArgument, Packed, PACKED_ELEMENTS);
GENERATE_INTERNAL_ARRAY_CTOR(SingleArgument, Holey, HOLEY_ELEMENTS);
#undef GENERATE_INTERNAL_ARRAY_CTOR
} // namespace internal
} // namespace v8

Some files were not shown because too many files have changed in this diff Show More