deps: update V8 to 6.1.534.36

PR-URL: https://github.com/nodejs/node/pull/14730
Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl>
Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com>
Reviewed-By: Colin Ihrig <cjihrig@gmail.com>
Reviewed-By: Matteo Collina <matteo.collina@gmail.com>
This commit is contained in:
Michaël Zasso 2017-09-12 11:34:59 +02:00 committed by Anna Henningsen
parent b4b7ac6ae8
commit d82e1075db
No known key found for this signature in database
GPG Key ID: 9C63F3A6CD2AD8F9
2007 changed files with 131172 additions and 261371 deletions

3
deps/v8/.gitignore vendored
View File

@ -35,6 +35,7 @@
/_*
/build
/buildtools
/gypfiles/.gold_plugin
/gypfiles/win_toolchain.json
/hydrogen.cfg
/obj
@ -47,8 +48,6 @@
/test/benchmarks/data
/test/fuzzer/wasm_corpus
/test/fuzzer/wasm_corpus.tar.gz
/test/fuzzer/wasm_asmjs_corpus
/test/fuzzer/wasm_asmjs_corpus.tar.gz
/test/mozilla/data
/test/promises-aplus/promises-tests
/test/promises-aplus/promises-tests.tar.gz

3
deps/v8/AUTHORS vendored
View File

@ -20,7 +20,7 @@ Imagination Technologies, LLC <*@imgtec.com>
Loongson Technology Corporation Limited <*@loongson.cn>
Code Aurora Forum <*@codeaurora.org>
Home Jinni Inc. <*@homejinni.com>
IBM Inc. <*@*.ibm.com>
IBM Inc. <*@*ibm.com>
Samsung <*@*.samsung.com>
Joyent, Inc <*@joyent.com>
RT-RK Computer Based System <*@rt-rk.com>
@ -126,6 +126,7 @@ Victor Costan <costan@gmail.com>
Vlad Burlik <vladbph@gmail.com>
Vladimir Krivosheev <develar@gmail.com>
Vladimir Shutoff <vovan@shutoff.ru>
Wiktor Garbacz <wiktor.garbacz@gmail.com>
Yu Yin <xwafish@gmail.com>
Zac Hansen <xaxxon@gmail.com>
Zhongping Wang <kewpie.w.zp@gmail.com>

315
deps/v8/BUILD.gn vendored
View File

@ -24,9 +24,6 @@ declare_args() {
# Sets -DV8_ENABLE_FUTURE.
v8_enable_future = false
# Sets -DV8_DISABLE_TURBO.
v8_disable_turbo = false
# Sets -DVERIFY_HEAP.
v8_enable_verify_heap = ""
@ -82,6 +79,10 @@ declare_args() {
# Sets -dV8_CONCURRENT_MARKING
v8_enable_concurrent_marking = false
# Build the snapshot with unwinding information for perf.
# Sets -dV8_USE_SNAPSHOT_WITH_UNWINDING_INFO.
v8_perf_prof_unwinding_info = false
# With post mortem support enabled, metadata is embedded into libv8 that
# describes various parameters of the VM for use by debuggers. See
# tools/gen-postmortem-metadata.py for details.
@ -111,9 +112,13 @@ declare_args() {
v8_experimental_extra_library_files =
[ "//test/cctest/test-experimental-extra.js" ]
v8_enable_gdbjit = ((v8_current_cpu == "x86" || v8_current_cpu == "x64" ||
v8_current_cpu == "x87") && (is_linux || is_mac)) ||
(v8_current_cpu == "ppc64" && is_linux)
v8_enable_gdbjit =
((v8_current_cpu == "x86" || v8_current_cpu == "x64") &&
(is_linux || is_mac)) || (v8_current_cpu == "ppc64" && is_linux)
# Temporary flag to allow embedders to update their microtasks scopes
# while rolling in a new version of V8.
v8_check_microtasks_scopes_consistency = ""
}
# Derived defaults.
@ -132,6 +137,9 @@ if (v8_enable_trace_maps == "") {
if (v8_enable_v8_checks == "") {
v8_enable_v8_checks = is_debug
}
if (v8_check_microtasks_scopes_consistency == "") {
v8_check_microtasks_scopes_consistency = is_debug || dcheck_always_on
}
# Specifies if the target build is a simulator build. Comparing target cpu
# with v8 target cpu to not affect simulator builds for making cross-compile
@ -219,9 +227,6 @@ config("features") {
if (v8_enable_future) {
defines += [ "V8_ENABLE_FUTURE" ]
}
if (v8_disable_turbo) {
defines += [ "V8_DISABLE_TURBO" ]
}
if (v8_enable_gdbjit) {
defines += [ "ENABLE_GDB_JIT_INTERFACE" ]
}
@ -263,6 +268,9 @@ config("features") {
}
if (v8_use_snapshot) {
defines += [ "V8_USE_SNAPSHOT" ]
if (v8_perf_prof_unwinding_info) {
defines += [ "V8_USE_SNAPSHOT_WITH_UNWINDING_INFO" ]
}
}
if (v8_use_external_startup_data) {
defines += [ "V8_USE_EXTERNAL_STARTUP_DATA" ]
@ -270,6 +278,9 @@ config("features") {
if (v8_enable_concurrent_marking) {
defines += [ "V8_CONCURRENT_MARKING" ]
}
if (v8_check_microtasks_scopes_consistency) {
defines += [ "V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY" ]
}
}
config("toolchain") {
@ -320,8 +331,7 @@ config("toolchain") {
defines += [ "_MIPS_TARGET_SIMULATOR" ]
}
# TODO(jochen): Add support for mips.
if (v8_current_cpu == "mipsel") {
if (v8_current_cpu == "mipsel" || v8_current_cpu == "mips") {
defines += [ "V8_TARGET_ARCH_MIPS" ]
if (v8_can_use_fpu_instructions) {
defines += [ "CAN_USE_FPU_INSTRUCTIONS" ]
@ -355,15 +365,17 @@ config("toolchain") {
# TODO(jochen): Add support for mips_arch_variant rx and loongson.
}
# TODO(jochen): Add support for mips64.
if (v8_current_cpu == "mips64el") {
if (v8_current_cpu == "mips64el" || v8_current_cpu == "mips64") {
defines += [ "V8_TARGET_ARCH_MIPS64" ]
if (v8_can_use_fpu_instructions) {
defines += [ "CAN_USE_FPU_INSTRUCTIONS" ]
}
# TODO(jochen): Add support for big endian host byteorder.
defines += [ "V8_TARGET_ARCH_MIPS64_LE" ]
if (host_byteorder == "little") {
defines += [ "V8_TARGET_ARCH_MIPS64_LE" ]
} else if (host_byteorder == "big") {
defines += [ "V8_TARGET_ARCH_MIPS64_BE" ]
}
if (v8_use_mips_abi_hardfloat) {
defines += [
"__mips_hard_float=1",
@ -496,6 +508,19 @@ config("toolchain") {
}
}
# Configs for code coverage with gcov. Separate configs for cflags and ldflags
# to selectively influde cflags in non-test targets only.
config("v8_gcov_coverage_cflags") {
cflags = [
"-fprofile-arcs",
"-ftest-coverage",
]
}
config("v8_gcov_coverage_ldflags") {
ldflags = [ "-fprofile-arcs" ]
}
###############################################################################
# Actions
#
@ -523,8 +548,6 @@ action("js2c") {
"src/js/typedarray.js",
"src/js/collection.js",
"src/js/weak-collection.js",
"src/js/collection-iterator.js",
"src/js/promise.js",
"src/js/messages.js",
"src/js/templates.js",
"src/js/spread.js",
@ -703,6 +726,12 @@ action("postmortem-metadata") {
"src/objects-inl.h",
"src/objects/map.h",
"src/objects/map-inl.h",
"src/objects/script.h",
"src/objects/script-inl.h",
"src/objects/shared-function-info.h",
"src/objects/shared-function-info-inl.h",
"src/objects/string.h",
"src/objects/string-inl.h",
]
outputs = [
@ -750,6 +779,10 @@ action("run_mksnapshot") {
]
}
if (v8_perf_prof_unwinding_info) {
args += [ "--perf-prof-unwinding-info" ]
}
if (v8_use_external_startup_data) {
outputs += [ "$root_out_dir/snapshot_blob.bin" ]
args += [
@ -769,6 +802,7 @@ action("v8_dump_build_config") {
outputs = [
"$root_out_dir/v8_build_config.json",
]
is_gcov_coverage = v8_code_coverage && !is_clang
args = [
rebase_path("$root_out_dir/v8_build_config.json", root_build_dir),
"current_cpu=\"$current_cpu\"",
@ -777,6 +811,7 @@ action("v8_dump_build_config") {
"is_cfi=$is_cfi",
"is_component_build=$is_component_build",
"is_debug=$is_debug",
"is_gcov_coverage=$is_gcov_coverage",
"is_msan=$is_msan",
"is_tsan=$is_tsan",
"target_cpu=\"$target_cpu\"",
@ -907,12 +942,16 @@ v8_source_set("v8_builtins_generators") {
"src/builtins/builtins-async-iterator-gen.cc",
"src/builtins/builtins-boolean-gen.cc",
"src/builtins/builtins-call-gen.cc",
"src/builtins/builtins-call-gen.h",
"src/builtins/builtins-collections-gen.cc",
"src/builtins/builtins-console-gen.cc",
"src/builtins/builtins-constructor-gen.cc",
"src/builtins/builtins-constructor-gen.h",
"src/builtins/builtins-constructor.h",
"src/builtins/builtins-conversion-gen.cc",
"src/builtins/builtins-conversion-gen.h",
"src/builtins/builtins-date-gen.cc",
"src/builtins/builtins-debug-gen.cc",
"src/builtins/builtins-forin-gen.cc",
"src/builtins/builtins-forin-gen.h",
"src/builtins/builtins-function-gen.cc",
@ -923,11 +962,14 @@ v8_source_set("v8_builtins_generators") {
"src/builtins/builtins-internal-gen.cc",
"src/builtins/builtins-interpreter-gen.cc",
"src/builtins/builtins-intl-gen.cc",
"src/builtins/builtins-iterator-gen.cc",
"src/builtins/builtins-iterator-gen.h",
"src/builtins/builtins-math-gen.cc",
"src/builtins/builtins-number-gen.cc",
"src/builtins/builtins-object-gen.cc",
"src/builtins/builtins-promise-gen.cc",
"src/builtins/builtins-promise-gen.h",
"src/builtins/builtins-proxy-gen.cc",
"src/builtins/builtins-regexp-gen.cc",
"src/builtins/builtins-regexp-gen.h",
"src/builtins/builtins-sharedarraybuffer-gen.cc",
@ -994,11 +1036,6 @@ v8_source_set("v8_builtins_generators") {
### gcmole(arch:s390) ###
"src/builtins/s390/builtins-s390.cc",
]
} else if (v8_current_cpu == "x87") {
sources += [
### gcmole(arch:x87) ###
"src/builtins/x87/builtins-x87.cc",
]
}
if (!v8_enable_i18n_support) {
@ -1053,6 +1090,9 @@ v8_header_set("v8_headers") {
v8_source_set("v8_base") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
# Split static libraries on windows into two.
split_count = 2
sources = [
"//base/trace_event/common/trace_event_common.h",
@ -1070,7 +1110,6 @@ v8_source_set("v8_base") {
"src/accessors.h",
"src/address-map.cc",
"src/address-map.h",
"src/allocation-site-scopes.cc",
"src/allocation-site-scopes.h",
"src/allocation.cc",
"src/allocation.h",
@ -1105,10 +1144,8 @@ v8_source_set("v8_base") {
"src/ast/ast-function-literal-id-reindexer.h",
"src/ast/ast-numbering.cc",
"src/ast/ast-numbering.h",
"src/ast/ast-source-ranges.h",
"src/ast/ast-traversal-visitor.h",
"src/ast/ast-type-bounds.h",
"src/ast/ast-types.cc",
"src/ast/ast-types.h",
"src/ast/ast-value-factory.cc",
"src/ast/ast-value-factory.h",
"src/ast/ast.cc",
@ -1145,11 +1182,11 @@ v8_source_set("v8_base") {
"src/builtins/builtins-boolean.cc",
"src/builtins/builtins-call.cc",
"src/builtins/builtins-callsite.cc",
"src/builtins/builtins-collections.cc",
"src/builtins/builtins-console.cc",
"src/builtins/builtins-constructor.h",
"src/builtins/builtins-dataview.cc",
"src/builtins/builtins-date.cc",
"src/builtins/builtins-debug.cc",
"src/builtins/builtins-definitions.h",
"src/builtins/builtins-descriptors.h",
"src/builtins/builtins-error.cc",
@ -1158,11 +1195,12 @@ v8_source_set("v8_base") {
"src/builtins/builtins-internal.cc",
"src/builtins/builtins-interpreter.cc",
"src/builtins/builtins-intl.cc",
"src/builtins/builtins-intl.h",
"src/builtins/builtins-json.cc",
"src/builtins/builtins-math.cc",
"src/builtins/builtins-number.cc",
"src/builtins/builtins-object.cc",
"src/builtins/builtins-proxy.cc",
"src/builtins/builtins-promise.cc",
"src/builtins/builtins-reflect.cc",
"src/builtins/builtins-regexp.cc",
"src/builtins/builtins-sharedarraybuffer.cc",
@ -1186,7 +1224,6 @@ v8_source_set("v8_base") {
"src/code-factory.h",
"src/code-stub-assembler.cc",
"src/code-stub-assembler.h",
"src/code-stubs-hydrogen.cc",
"src/code-stubs-utils.h",
"src/code-stubs.cc",
"src/code-stubs.h",
@ -1232,6 +1269,8 @@ v8_source_set("v8_base") {
"src/compiler/bytecode-liveness-map.cc",
"src/compiler/bytecode-liveness-map.h",
"src/compiler/c-linkage.cc",
"src/compiler/check-elimination.cc",
"src/compiler/check-elimination.h",
"src/compiler/checkpoint-elimination.cc",
"src/compiler/checkpoint-elimination.h",
"src/compiler/code-assembler.cc",
@ -1324,8 +1363,6 @@ v8_source_set("v8_base") {
"src/compiler/linkage.h",
"src/compiler/live-range-separator.cc",
"src/compiler/live-range-separator.h",
"src/compiler/liveness-analyzer.cc",
"src/compiler/liveness-analyzer.h",
"src/compiler/load-elimination.cc",
"src/compiler/load-elimination.h",
"src/compiler/loop-analysis.cc",
@ -1369,6 +1406,8 @@ v8_source_set("v8_base") {
"src/compiler/pipeline-statistics.h",
"src/compiler/pipeline.cc",
"src/compiler/pipeline.h",
"src/compiler/property-access-builder.cc",
"src/compiler/property-access-builder.h",
"src/compiler/raw-machine-assembler.cc",
"src/compiler/raw-machine-assembler.h",
"src/compiler/redundancy-elimination.cc",
@ -1397,8 +1436,6 @@ v8_source_set("v8_base") {
"src/compiler/state-values-utils.h",
"src/compiler/store-store-elimination.cc",
"src/compiler/store-store-elimination.h",
"src/compiler/tail-call-optimization.cc",
"src/compiler/tail-call-optimization.h",
"src/compiler/type-cache.cc",
"src/compiler/type-cache.h",
"src/compiler/typed-optimization.cc",
@ -1426,67 +1463,6 @@ v8_source_set("v8_base") {
"src/counters-inl.h",
"src/counters.cc",
"src/counters.h",
"src/crankshaft/compilation-phase.cc",
"src/crankshaft/compilation-phase.h",
"src/crankshaft/hydrogen-alias-analysis.h",
"src/crankshaft/hydrogen-bce.cc",
"src/crankshaft/hydrogen-bce.h",
"src/crankshaft/hydrogen-canonicalize.cc",
"src/crankshaft/hydrogen-canonicalize.h",
"src/crankshaft/hydrogen-check-elimination.cc",
"src/crankshaft/hydrogen-check-elimination.h",
"src/crankshaft/hydrogen-dce.cc",
"src/crankshaft/hydrogen-dce.h",
"src/crankshaft/hydrogen-dehoist.cc",
"src/crankshaft/hydrogen-dehoist.h",
"src/crankshaft/hydrogen-environment-liveness.cc",
"src/crankshaft/hydrogen-environment-liveness.h",
"src/crankshaft/hydrogen-escape-analysis.cc",
"src/crankshaft/hydrogen-escape-analysis.h",
"src/crankshaft/hydrogen-flow-engine.h",
"src/crankshaft/hydrogen-gvn.cc",
"src/crankshaft/hydrogen-gvn.h",
"src/crankshaft/hydrogen-infer-representation.cc",
"src/crankshaft/hydrogen-infer-representation.h",
"src/crankshaft/hydrogen-infer-types.cc",
"src/crankshaft/hydrogen-infer-types.h",
"src/crankshaft/hydrogen-instructions.cc",
"src/crankshaft/hydrogen-instructions.h",
"src/crankshaft/hydrogen-load-elimination.cc",
"src/crankshaft/hydrogen-load-elimination.h",
"src/crankshaft/hydrogen-mark-unreachable.cc",
"src/crankshaft/hydrogen-mark-unreachable.h",
"src/crankshaft/hydrogen-osr.cc",
"src/crankshaft/hydrogen-osr.h",
"src/crankshaft/hydrogen-range-analysis.cc",
"src/crankshaft/hydrogen-range-analysis.h",
"src/crankshaft/hydrogen-redundant-phi.cc",
"src/crankshaft/hydrogen-redundant-phi.h",
"src/crankshaft/hydrogen-removable-simulates.cc",
"src/crankshaft/hydrogen-removable-simulates.h",
"src/crankshaft/hydrogen-representation-changes.cc",
"src/crankshaft/hydrogen-representation-changes.h",
"src/crankshaft/hydrogen-sce.cc",
"src/crankshaft/hydrogen-sce.h",
"src/crankshaft/hydrogen-store-elimination.cc",
"src/crankshaft/hydrogen-store-elimination.h",
"src/crankshaft/hydrogen-types.cc",
"src/crankshaft/hydrogen-types.h",
"src/crankshaft/hydrogen-uint32-analysis.cc",
"src/crankshaft/hydrogen-uint32-analysis.h",
"src/crankshaft/hydrogen.cc",
"src/crankshaft/hydrogen.h",
"src/crankshaft/lithium-allocator-inl.h",
"src/crankshaft/lithium-allocator.cc",
"src/crankshaft/lithium-allocator.h",
"src/crankshaft/lithium-codegen.cc",
"src/crankshaft/lithium-codegen.h",
"src/crankshaft/lithium-inl.h",
"src/crankshaft/lithium.cc",
"src/crankshaft/lithium.h",
"src/crankshaft/typing.cc",
"src/crankshaft/typing.h",
"src/crankshaft/unique.h",
"src/date.cc",
"src/date.h",
"src/dateparser-inl.h",
@ -1518,7 +1494,6 @@ v8_source_set("v8_base") {
"src/double.h",
"src/dtoa.cc",
"src/dtoa.h",
"src/effects.h",
"src/eh-frame.cc",
"src/eh-frame.h",
"src/elements-kind.cc",
@ -1560,6 +1535,7 @@ v8_source_set("v8_base") {
"src/flag-definitions.h",
"src/flags.cc",
"src/flags.h",
"src/float.h",
"src/frames-inl.h",
"src/frames.cc",
"src/frames.h",
@ -1581,7 +1557,6 @@ v8_source_set("v8_base") {
"src/heap/array-buffer-tracker.h",
"src/heap/code-stats.cc",
"src/heap/code-stats.h",
"src/heap/concurrent-marking-deque.h",
"src/heap/concurrent-marking.cc",
"src/heap/concurrent-marking.h",
"src/heap/embedder-tracing.cc",
@ -1599,9 +1574,11 @@ v8_source_set("v8_base") {
"src/heap/incremental-marking.cc",
"src/heap/incremental-marking.h",
"src/heap/item-parallel-job.h",
"src/heap/local-allocator.h",
"src/heap/mark-compact-inl.h",
"src/heap/mark-compact.cc",
"src/heap/mark-compact.h",
"src/heap/marking.cc",
"src/heap/marking.h",
"src/heap/memory-reducer.cc",
"src/heap/memory-reducer.h",
@ -1610,7 +1587,6 @@ v8_source_set("v8_base") {
"src/heap/objects-visiting-inl.h",
"src/heap/objects-visiting.cc",
"src/heap/objects-visiting.h",
"src/heap/page-parallel-job.h",
"src/heap/remembered-set.h",
"src/heap/scavenge-job.cc",
"src/heap/scavenge-job.h",
@ -1625,7 +1601,7 @@ v8_source_set("v8_base") {
"src/heap/spaces.h",
"src/heap/store-buffer.cc",
"src/heap/store-buffer.h",
"src/heap/workstealing-marking-deque.h",
"src/heap/worklist.h",
"src/ic/access-compiler-data.h",
"src/ic/access-compiler.cc",
"src/ic/access-compiler.h",
@ -1650,6 +1626,7 @@ v8_source_set("v8_base") {
"src/identity-map.h",
"src/interface-descriptors.cc",
"src/interface-descriptors.h",
"src/interpreter/block-coverage-builder.h",
"src/interpreter/bytecode-array-accessor.cc",
"src/interpreter/bytecode-array-accessor.h",
"src/interpreter/bytecode-array-builder.cc",
@ -1740,10 +1717,15 @@ v8_source_set("v8_base") {
"src/objects-printer.cc",
"src/objects.cc",
"src/objects.h",
"src/objects/arguments-inl.h",
"src/objects/arguments.h",
"src/objects/code-cache-inl.h",
"src/objects/code-cache.h",
"src/objects/compilation-cache-inl.h",
"src/objects/compilation-cache.h",
"src/objects/debug-objects-inl.h",
"src/objects/debug-objects.cc",
"src/objects/debug-objects.h",
"src/objects/descriptor-array.h",
"src/objects/dictionary.h",
"src/objects/frame-array-inl.h",
@ -1757,12 +1739,20 @@ v8_source_set("v8_base") {
"src/objects/map-inl.h",
"src/objects/map.h",
"src/objects/module-info.h",
"src/objects/name-inl.h",
"src/objects/name.h",
"src/objects/object-macros-undef.h",
"src/objects/object-macros.h",
"src/objects/regexp-match-info.h",
"src/objects/scope-info.cc",
"src/objects/scope-info.h",
"src/objects/script-inl.h",
"src/objects/script.h",
"src/objects/shared-function-info-inl.h",
"src/objects/shared-function-info.h",
"src/objects/string-inl.h",
"src/objects/string-table.h",
"src/objects/string.h",
"src/ostreams.cc",
"src/ostreams.h",
"src/parsing/duplicate-finder.h",
@ -1948,8 +1938,6 @@ v8_source_set("v8_base") {
"src/trap-handler/trap-handler.h",
"src/type-hints.cc",
"src/type-hints.h",
"src/type-info.cc",
"src/type-info.h",
"src/unicode-cache-inl.h",
"src/unicode-cache.h",
"src/unicode-decoder.cc",
@ -1976,6 +1964,8 @@ v8_source_set("v8_base") {
"src/visitors.h",
"src/vm-state-inl.h",
"src/vm-state.h",
"src/wasm/compilation-manager.cc",
"src/wasm/compilation-manager.h",
"src/wasm/decoder.h",
"src/wasm/function-body-decoder-impl.h",
"src/wasm/function-body-decoder.cc",
@ -1983,6 +1973,8 @@ v8_source_set("v8_base") {
"src/wasm/leb-helper.h",
"src/wasm/local-decl-encoder.cc",
"src/wasm/local-decl-encoder.h",
"src/wasm/module-compiler.cc",
"src/wasm/module-compiler.h",
"src/wasm/module-decoder.cc",
"src/wasm/module-decoder.h",
"src/wasm/signature-map.cc",
@ -2011,6 +2003,7 @@ v8_source_set("v8_base") {
"src/wasm/wasm-result.h",
"src/wasm/wasm-text.cc",
"src/wasm/wasm-text.h",
"src/wasm/wasm-value.h",
"src/zone/accounting-allocator.cc",
"src/zone/accounting-allocator.h",
"src/zone/zone-allocator.h",
@ -2030,12 +2023,6 @@ v8_source_set("v8_base") {
"src/compiler/ia32/instruction-codes-ia32.h",
"src/compiler/ia32/instruction-scheduler-ia32.cc",
"src/compiler/ia32/instruction-selector-ia32.cc",
"src/crankshaft/ia32/lithium-codegen-ia32.cc",
"src/crankshaft/ia32/lithium-codegen-ia32.h",
"src/crankshaft/ia32/lithium-gap-resolver-ia32.cc",
"src/crankshaft/ia32/lithium-gap-resolver-ia32.h",
"src/crankshaft/ia32/lithium-ia32.cc",
"src/crankshaft/ia32/lithium-ia32.h",
"src/debug/ia32/debug-ia32.cc",
"src/full-codegen/ia32/full-codegen-ia32.cc",
"src/ia32/assembler-ia32-inl.h",
@ -2070,12 +2057,6 @@ v8_source_set("v8_base") {
"src/compiler/x64/instruction-selector-x64.cc",
"src/compiler/x64/unwinding-info-writer-x64.cc",
"src/compiler/x64/unwinding-info-writer-x64.h",
"src/crankshaft/x64/lithium-codegen-x64.cc",
"src/crankshaft/x64/lithium-codegen-x64.h",
"src/crankshaft/x64/lithium-gap-resolver-x64.cc",
"src/crankshaft/x64/lithium-gap-resolver-x64.h",
"src/crankshaft/x64/lithium-x64.cc",
"src/crankshaft/x64/lithium-x64.h",
"src/debug/x64/debug-x64.cc",
"src/full-codegen/x64/full-codegen-x64.cc",
"src/ic/x64/access-compiler-x64.cc",
@ -2136,12 +2117,6 @@ v8_source_set("v8_base") {
"src/compiler/arm/instruction-selector-arm.cc",
"src/compiler/arm/unwinding-info-writer-arm.cc",
"src/compiler/arm/unwinding-info-writer-arm.h",
"src/crankshaft/arm/lithium-arm.cc",
"src/crankshaft/arm/lithium-arm.h",
"src/crankshaft/arm/lithium-codegen-arm.cc",
"src/crankshaft/arm/lithium-codegen-arm.h",
"src/crankshaft/arm/lithium-gap-resolver-arm.cc",
"src/crankshaft/arm/lithium-gap-resolver-arm.h",
"src/debug/arm/debug-arm.cc",
"src/full-codegen/arm/full-codegen-arm.cc",
"src/ic/arm/access-compiler-arm.cc",
@ -2181,6 +2156,7 @@ v8_source_set("v8_base") {
"src/arm64/macro-assembler-arm64.h",
"src/arm64/simulator-arm64.cc",
"src/arm64/simulator-arm64.h",
"src/arm64/simulator-logic-arm64.cc",
"src/arm64/utils-arm64.cc",
"src/arm64/utils-arm64.h",
"src/compiler/arm64/code-generator-arm64.cc",
@ -2189,15 +2165,6 @@ v8_source_set("v8_base") {
"src/compiler/arm64/instruction-selector-arm64.cc",
"src/compiler/arm64/unwinding-info-writer-arm64.cc",
"src/compiler/arm64/unwinding-info-writer-arm64.h",
"src/crankshaft/arm64/delayed-masm-arm64-inl.h",
"src/crankshaft/arm64/delayed-masm-arm64.cc",
"src/crankshaft/arm64/delayed-masm-arm64.h",
"src/crankshaft/arm64/lithium-arm64.cc",
"src/crankshaft/arm64/lithium-arm64.h",
"src/crankshaft/arm64/lithium-codegen-arm64.cc",
"src/crankshaft/arm64/lithium-codegen-arm64.h",
"src/crankshaft/arm64/lithium-gap-resolver-arm64.cc",
"src/crankshaft/arm64/lithium-gap-resolver-arm64.h",
"src/debug/arm64/debug-arm64.cc",
"src/full-codegen/arm64/full-codegen-arm64.cc",
"src/ic/arm64/access-compiler-arm64.cc",
@ -2212,12 +2179,6 @@ v8_source_set("v8_base") {
"src/compiler/mips/instruction-codes-mips.h",
"src/compiler/mips/instruction-scheduler-mips.cc",
"src/compiler/mips/instruction-selector-mips.cc",
"src/crankshaft/mips/lithium-codegen-mips.cc",
"src/crankshaft/mips/lithium-codegen-mips.h",
"src/crankshaft/mips/lithium-gap-resolver-mips.cc",
"src/crankshaft/mips/lithium-gap-resolver-mips.h",
"src/crankshaft/mips/lithium-mips.cc",
"src/crankshaft/mips/lithium-mips.h",
"src/debug/mips/debug-mips.cc",
"src/full-codegen/mips/full-codegen-mips.cc",
"src/ic/mips/access-compiler-mips.cc",
@ -2251,12 +2212,6 @@ v8_source_set("v8_base") {
"src/compiler/mips64/instruction-codes-mips64.h",
"src/compiler/mips64/instruction-scheduler-mips64.cc",
"src/compiler/mips64/instruction-selector-mips64.cc",
"src/crankshaft/mips64/lithium-codegen-mips64.cc",
"src/crankshaft/mips64/lithium-codegen-mips64.h",
"src/crankshaft/mips64/lithium-gap-resolver-mips64.cc",
"src/crankshaft/mips64/lithium-gap-resolver-mips64.h",
"src/crankshaft/mips64/lithium-mips64.cc",
"src/crankshaft/mips64/lithium-mips64.h",
"src/debug/mips64/debug-mips64.cc",
"src/full-codegen/mips64/full-codegen-mips64.cc",
"src/ic/mips64/access-compiler-mips64.cc",
@ -2290,12 +2245,6 @@ v8_source_set("v8_base") {
"src/compiler/ppc/instruction-codes-ppc.h",
"src/compiler/ppc/instruction-scheduler-ppc.cc",
"src/compiler/ppc/instruction-selector-ppc.cc",
"src/crankshaft/ppc/lithium-codegen-ppc.cc",
"src/crankshaft/ppc/lithium-codegen-ppc.h",
"src/crankshaft/ppc/lithium-gap-resolver-ppc.cc",
"src/crankshaft/ppc/lithium-gap-resolver-ppc.h",
"src/crankshaft/ppc/lithium-ppc.cc",
"src/crankshaft/ppc/lithium-ppc.h",
"src/debug/ppc/debug-ppc.cc",
"src/full-codegen/ppc/full-codegen-ppc.cc",
"src/ic/ppc/access-compiler-ppc.cc",
@ -2329,12 +2278,6 @@ v8_source_set("v8_base") {
"src/compiler/s390/instruction-codes-s390.h",
"src/compiler/s390/instruction-scheduler-s390.cc",
"src/compiler/s390/instruction-selector-s390.cc",
"src/crankshaft/s390/lithium-codegen-s390.cc",
"src/crankshaft/s390/lithium-codegen-s390.h",
"src/crankshaft/s390/lithium-gap-resolver-s390.cc",
"src/crankshaft/s390/lithium-gap-resolver-s390.h",
"src/crankshaft/s390/lithium-s390.cc",
"src/crankshaft/s390/lithium-s390.h",
"src/debug/s390/debug-s390.cc",
"src/full-codegen/s390/full-codegen-s390.cc",
"src/ic/s390/access-compiler-s390.cc",
@ -2362,43 +2305,6 @@ v8_source_set("v8_base") {
"src/s390/simulator-s390.cc",
"src/s390/simulator-s390.h",
]
} else if (v8_current_cpu == "x87") {
sources += [ ### gcmole(arch:x87) ###
"src/compiler/x87/code-generator-x87.cc",
"src/compiler/x87/instruction-codes-x87.h",
"src/compiler/x87/instruction-scheduler-x87.cc",
"src/compiler/x87/instruction-selector-x87.cc",
"src/crankshaft/x87/lithium-codegen-x87.cc",
"src/crankshaft/x87/lithium-codegen-x87.h",
"src/crankshaft/x87/lithium-gap-resolver-x87.cc",
"src/crankshaft/x87/lithium-gap-resolver-x87.h",
"src/crankshaft/x87/lithium-x87.cc",
"src/crankshaft/x87/lithium-x87.h",
"src/debug/x87/debug-x87.cc",
"src/full-codegen/x87/full-codegen-x87.cc",
"src/ic/x87/access-compiler-x87.cc",
"src/ic/x87/handler-compiler-x87.cc",
"src/ic/x87/ic-x87.cc",
"src/regexp/x87/regexp-macro-assembler-x87.cc",
"src/regexp/x87/regexp-macro-assembler-x87.h",
"src/x87/assembler-x87-inl.h",
"src/x87/assembler-x87.cc",
"src/x87/assembler-x87.h",
"src/x87/code-stubs-x87.cc",
"src/x87/code-stubs-x87.h",
"src/x87/codegen-x87.cc",
"src/x87/codegen-x87.h",
"src/x87/cpu-x87.cc",
"src/x87/deoptimizer-x87.cc",
"src/x87/disasm-x87.cc",
"src/x87/frames-x87.cc",
"src/x87/frames-x87.h",
"src/x87/interface-descriptors-x87.cc",
"src/x87/macro-assembler-x87.cc",
"src/x87/macro-assembler-x87.h",
"src/x87/simulator-x87.cc",
"src/x87/simulator-x87.h",
]
}
configs = [ ":internal_config" ]
@ -2421,6 +2327,8 @@ v8_source_set("v8_base") {
} else {
sources -= [
"src/builtins/builtins-intl.cc",
"src/builtins/builtins-intl.h",
"src/char-predicates.cc",
"src/intl.cc",
"src/intl.h",
"src/objects/intl-objects.cc",
@ -2473,6 +2381,7 @@ v8_component("v8_libbase") {
"src/base/macros.h",
"src/base/once.cc",
"src/base/once.h",
"src/base/optional.h",
"src/base/platform/condition-variable.cc",
"src/base/platform/condition-variable.h",
"src/base/platform/elapsed-timer.h",
@ -2490,6 +2399,7 @@ v8_component("v8_libbase") {
"src/base/safe_math_impl.h",
"src/base/sys-info.cc",
"src/base/sys-info.h",
"src/base/template-utils.h",
"src/base/timezone-cache.h",
"src/base/utils/random-number-generator.cc",
"src/base/utils/random-number-generator.h",
@ -2557,6 +2467,11 @@ v8_component("v8_libbase") {
"src/base/platform/platform-linux.cc",
]
}
} else if (is_fuchsia) {
sources += [
"src/base/debug/stack_trace_fuchsia.cc",
"src/base/platform/platform-fuchsia.cc",
]
} else if (is_mac) {
sources += [
"src/base/debug/stack_trace_posix.cc",
@ -2737,7 +2652,7 @@ group("v8_fuzzers") {
":v8_simple_json_fuzzer",
":v8_simple_parser_fuzzer",
":v8_simple_regexp_fuzzer",
":v8_simple_wasm_asmjs_fuzzer",
":v8_simple_wasm_async_fuzzer",
":v8_simple_wasm_call_fuzzer",
":v8_simple_wasm_code_fuzzer",
":v8_simple_wasm_compile_fuzzer",
@ -2758,10 +2673,6 @@ if (is_component_build) {
"src/v8dll-main.cc",
]
deps = [
":v8_dump_build_config",
]
public_deps = [
":v8_base",
":v8_maybe_snapshot",
@ -2779,10 +2690,6 @@ if (is_component_build) {
"src/v8dll-main.cc",
]
deps = [
":v8_dump_build_config",
]
public_deps = [
":v8_base",
":v8_maybe_snapshot",
@ -2798,10 +2705,6 @@ if (is_component_build) {
}
} else {
group("v8") {
deps = [
":v8_dump_build_config",
]
public_deps = [
":v8_base",
":v8_maybe_snapshot",
@ -2813,10 +2716,6 @@ if (is_component_build) {
group("v8_for_testing") {
testonly = true
deps = [
":v8_dump_build_config",
]
public_deps = [
":v8_base",
":v8_maybe_snapshot",
@ -3069,9 +2968,9 @@ v8_source_set("wasm_fuzzer") {
v8_fuzzer("wasm_fuzzer") {
}
v8_source_set("wasm_asmjs_fuzzer") {
v8_source_set("wasm_async_fuzzer") {
sources = [
"test/fuzzer/wasm-asmjs.cc",
"test/fuzzer/wasm-async.cc",
]
deps = [
@ -3086,7 +2985,7 @@ v8_source_set("wasm_asmjs_fuzzer") {
]
}
v8_fuzzer("wasm_asmjs_fuzzer") {
v8_fuzzer("wasm_async_fuzzer") {
}
v8_source_set("wasm_code_fuzzer") {

2830
deps/v8/ChangeLog vendored

File diff suppressed because it is too large Load Diff

27
deps/v8/DEPS vendored
View File

@ -8,23 +8,23 @@ vars = {
deps = {
"v8/build":
Var("chromium_url") + "/chromium/src/build.git" + "@" + "1caf3a69f3b0379c9fef2493aa1b3cda96e17d7b",
Var("chromium_url") + "/chromium/src/build.git" + "@" + "1808a907ce42f13b224c263e9843d718fc6d9c39",
"v8/tools/gyp":
Var("chromium_url") + "/external/gyp.git" + "@" + "eb296f67da078ec01f5e3a9ea9cdc6d26d680161",
"v8/third_party/icu":
Var("chromium_url") + "/chromium/deps/icu.git" + "@" + "c844075aa0f1758d04f9192825f1b1e7e607992e",
Var("chromium_url") + "/chromium/deps/icu.git" + "@" + "dfa798fe694702b43a3debc3290761f22b1acaf8",
"v8/third_party/instrumented_libraries":
Var("chromium_url") + "/chromium/src/third_party/instrumented_libraries.git" + "@" + "644afd349826cb68204226a16c38bde13abe9c3c",
"v8/buildtools":
Var("chromium_url") + "/chromium/buildtools.git" + "@" + "98f00fa10dbad2cdbb2e297a66c3d6d5bc3994f3",
Var("chromium_url") + "/chromium/buildtools.git" + "@" + "5ad14542a6a74dd914f067b948c5d3e8d170396b",
"v8/base/trace_event/common":
Var("chromium_url") + "/chromium/src/base/trace_event/common.git" + "@" + "39a3450531fc73432e963db8668695d2e8f13053",
Var("chromium_url") + "/chromium/src/base/trace_event/common.git" + "@" + "65d1d42a5df6c0a563a6fdfa58a135679185e5d9",
"v8/third_party/jinja2":
Var("chromium_url") + "/chromium/src/third_party/jinja2.git" + "@" + "d34383206fa42d52faa10bb9931d6d538f3a57e0",
"v8/third_party/markupsafe":
Var("chromium_url") + "/chromium/src/third_party/markupsafe.git" + "@" + "8f45f5cfa0009d2a70589bcda0349b8cb2b72783",
"v8/tools/swarming_client":
Var('chromium_url') + '/external/swarming.client.git' + '@' + "a941a089ff1000403078b74cb628eb430f07d271",
Var('chromium_url') + '/external/swarming.client.git' + '@' + "a56c2b39ca23bdf41458421a7f825ddbf3f43f28",
"v8/testing/gtest":
Var("chromium_url") + "/external/github.com/google/googletest.git" + "@" + "6f8a66431cb592dad629028a50b3dd418a408c87",
"v8/testing/gmock":
@ -34,21 +34,21 @@ deps = {
"v8/test/mozilla/data":
Var("chromium_url") + "/v8/deps/third_party/mozilla-tests.git" + "@" + "f6c578a10ea707b1a8ab0b88943fe5115ce2b9be",
"v8/test/test262/data":
Var("chromium_url") + "/external/github.com/tc39/test262.git" + "@" + "230f9fc5688ce76bfaa99aba5f680a159eaac9e2",
Var("chromium_url") + "/external/github.com/tc39/test262.git" + "@" + "1b911a8f8abf4cb63882cfbe72dcd4c82bb8ad91",
"v8/test/test262/harness":
Var("chromium_url") + "/external/github.com/test262-utils/test262-harness-py.git" + "@" + "0f2acdd882c84cff43b9d60df7574a1901e2cdcd",
"v8/tools/clang":
Var("chromium_url") + "/chromium/src/tools/clang.git" + "@" + "05f306039aa5029fa88768690e5c512097419f9d",
Var("chromium_url") + "/chromium/src/tools/clang.git" + "@" + "844603c1fcd47f578931b3ccd583e19f816a3842",
"v8/test/wasm-js":
Var("chromium_url") + "/external/github.com/WebAssembly/spec.git" + "@" + "07fd6430f879d36928d179a62d9bdeed82286065",
Var("chromium_url") + "/external/github.com/WebAssembly/spec.git" + "@" + "aadd3a340c78e53078a7bb6c17cc30f105c2960c",
}
deps_os = {
"android": {
"v8/third_party/android_tools":
Var("chromium_url") + "/android_tools.git" + "@" + "cb6bc21107001e2f2eeee2707b482b2b755baf51",
Var("chromium_url") + "/android_tools.git" + "@" + "e9d4018e149d50172ed462a7c21137aa915940ec",
"v8/third_party/catapult":
Var('chromium_url') + "/external/github.com/catapult-project/catapult.git" + "@" + "08a6e0ac161db7309d8f9cad0ccd38e0b1fd41e0",
Var('chromium_url') + "/external/github.com/catapult-project/catapult.git" + "@" + "44b022b2a09508ec025ae76a26308e89deb2cf69",
},
}
@ -262,13 +262,6 @@ hooks = [
'v8/third_party/binutils/download.py',
],
},
{
# Pull gold plugin if needed or requested via GYP_DEFINES.
# Note: This must run before the clang update.
'name': 'gold_plugin',
'pattern': '.',
'action': ['python', 'v8/gypfiles/download_gold_plugin.py'],
},
{
# Pull clang if needed or requested via GYP_DEFINES.
# Note: On Win, this should run after win_toolchain, as it may use it.

11
deps/v8/Makefile vendored
View File

@ -255,14 +255,13 @@ endif
# Architectures and modes to be compiled. Consider these to be internal
# variables, don't override them (use the targets instead).
ARCHES = ia32 x64 arm arm64 mips mipsel mips64 mips64el x87 ppc ppc64 s390 \
s390x
ARCHES32 = ia32 arm mips mipsel x87 ppc s390
ARCHES = ia32 x64 arm arm64 mips mipsel mips64 mips64el ppc ppc64 s390 s390x
ARCHES32 = ia32 arm mips mipsel ppc s390
DEFAULT_ARCHES = ia32 x64 arm
MODES = release debug optdebug
DEFAULT_MODES = release debug
ANDROID_ARCHES = android_ia32 android_x64 android_arm android_arm64 \
android_mipsel android_x87
android_mipsel
# List of files that trigger Makefile regeneration:
GYPFILES = third_party/icu/icu.gypi third_party/icu/icu.gyp \
@ -272,9 +271,7 @@ GYPFILES = third_party/icu/icu.gypi third_party/icu/icu.gyp \
test/cctest/cctest.gyp test/fuzzer/fuzzer.gyp \
test/unittests/unittests.gyp src/v8.gyp \
tools/parser-shell.gyp testing/gmock.gyp testing/gtest.gyp \
buildtools/third_party/libc++abi/libc++abi.gyp \
buildtools/third_party/libc++/libc++.gyp samples/samples.gyp \
src/third_party/vtune/v8vtune.gyp src/d8.gyp
samples/samples.gyp src/third_party/vtune/v8vtune.gyp src/d8.gyp
# If vtunejit=on, the v8vtune.gyp will be appended.
ifeq ($(vtunejit), on)

3
deps/v8/OWNERS vendored
View File

@ -35,3 +35,6 @@ ulan@chromium.org
verwaest@chromium.org
vogelheim@chromium.org
yangguo@chromium.org
# TEAM: v8-dev@googlegroups.com
# COMPONENT: Blink>JavaScript

37
deps/v8/PRESUBMIT.py vendored
View File

@ -31,6 +31,7 @@ See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
import json
import re
import sys
@ -277,6 +278,7 @@ def _CommonChecks(input_api, output_api):
results.extend(
_CheckNoInlineHeaderIncludesInNormalHeaders(input_api, output_api))
results.extend(_CheckMissingFiles(input_api, output_api))
results.extend(_CheckJSONFiles(input_api, output_api))
return results
@ -316,6 +318,25 @@ def _CheckCommitMessageBugEntry(input_api, output_api):
return [output_api.PresubmitError(r) for r in results]
def _CheckJSONFiles(input_api, output_api):
def FilterFile(affected_file):
return input_api.FilterSourceFile(
affected_file,
white_list=(r'.+\.json',))
results = []
for f in input_api.AffectedFiles(
file_filter=FilterFile, include_deletes=False):
with open(f.LocalPath()) as j:
try:
json.load(j)
except Exception as e:
results.append(
'JSON validation failed for %s. Error:\n%s' % (f.LocalPath(), e))
return [output_api.PresubmitError(r) for r in results]
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
@ -332,3 +353,19 @@ def CheckChangeOnCommit(input_api, output_api):
input_api, output_api,
json_url='http://v8-status.appspot.com/current?format=json'))
return results
def PostUploadHook(cl, change, output_api):
"""git cl upload will call this hook after the issue is created/modified.
This hook adds a noi18n bot if the patch affects Intl.
"""
def affects_intl(f):
return 'intl' in f.LocalPath() or 'test262' in f.LocalPath()
if not change.AffectedFiles(file_filter=affects_intl):
return []
return output_api.EnsureCQIncludeTrybotsAreAdded(
cl,
[
'master.tryserver.v8:v8_linux_noi18n_rel_ng'
],
'Automatically added noi18n trybots to run tests on CQ.')

View File

@ -359,6 +359,12 @@
TRACE_EVENT_PHASE_MARK, category_group, name, timestamp, \
TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val)
#define TRACE_EVENT_MARK_WITH_TIMESTAMP2( \
category_group, name, timestamp, arg1_name, arg1_val, arg2_name, arg2_val) \
INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP( \
TRACE_EVENT_PHASE_MARK, category_group, name, timestamp, \
TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, arg2_name, arg2_val)
#define TRACE_EVENT_COPY_MARK(category_group, name) \
INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_MARK, category_group, name, \
TRACE_EVENT_FLAG_COPY)

View File

@ -2,9 +2,6 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
mac_sdk_min_build_override = "10.10"
mac_deployment_target_build_override = "10.7"
# Variable that can be used to support multiple build scenarios, like having
# Chromium specific targets in a client project's GN file etc.
build_with_chromium = false

View File

@ -4,3 +4,4 @@ CODE_REVIEW_SERVER: https://codereview.chromium.org
CC_LIST: v8-reviews@googlegroups.com
VIEW_VC: https://chromium.googlesource.com/v8/v8/+/
STATUS: http://v8-status.appspot.com/status
RUN_POST_UPLOAD_HOOK: True

View File

@ -56,16 +56,16 @@ template("v8_isolate_run") {
}
# Translate gn to gyp variables.
if (v8_code_coverage) {
coverage = "1"
} else {
coverage = "0"
}
if (is_asan) {
asan = "1"
} else {
asan = "0"
}
if (is_lsan) {
lsan = "1"
} else {
lsan = "0"
}
if (is_msan) {
msan = "1"
} else {
@ -158,15 +158,13 @@ template("v8_isolate_run") {
"--config-variable",
"is_gn=1",
"--config-variable",
"lsan=$lsan",
"--config-variable",
"msan=$msan",
"--config-variable",
"tsan=$tsan",
"--config-variable",
"coverage=0",
"coverage=$coverage",
"--config-variable",
"sanitizer_coverage=0",
"sanitizer_coverage=$sanitizer_coverage_flags",
"--config-variable",
"component=$component",
"--config-variable",

50
deps/v8/gni/v8.gni vendored
View File

@ -4,8 +4,13 @@
import("//build/config/sanitizers/sanitizers.gni")
import("//build/config/v8_target_cpu.gni")
import("//build/split_static_library.gni")
declare_args() {
# Set flags for tracking code coverage. Uses gcov with gcc and sanitizer
# coverage with clang.
v8_code_coverage = false
# Includes files needed for correctness fuzzing.
v8_correctness_fuzzer = false
@ -84,6 +89,13 @@ if (is_debug && !v8_optimized_debug) {
}
}
if (v8_code_coverage && !is_clang) {
v8_add_configs += [
v8_path_prefix + ":v8_gcov_coverage_cflags",
v8_path_prefix + ":v8_gcov_coverage_ldflags",
]
}
if (is_posix && v8_enable_backtrace) {
v8_remove_configs += [ "//build/config/gcc:symbol_visibility_hidden" ]
v8_add_configs += [ "//build/config/gcc:symbol_visibility_default" ]
@ -91,20 +103,19 @@ if (is_posix && v8_enable_backtrace) {
# All templates should be kept in sync.
template("v8_source_set") {
if (defined(v8_static_library) && v8_static_library) {
static_library(target_name) {
forward_variables_from(invoker, "*", [ "configs" ])
configs += invoker.configs
configs -= v8_remove_configs
configs += v8_add_configs
}
if (defined(invoker.split_count) && invoker.split_count > 1 &&
defined(v8_static_library) && v8_static_library && is_win) {
link_target_type = "split_static_library"
} else if (defined(v8_static_library) && v8_static_library) {
link_target_type = "static_library"
} else {
source_set(target_name) {
forward_variables_from(invoker, "*", [ "configs" ])
configs += invoker.configs
configs -= v8_remove_configs
configs += v8_add_configs
}
link_target_type = "source_set"
}
target(link_target_type, target_name) {
forward_variables_from(invoker, "*", [ "configs" ])
configs += invoker.configs
configs -= v8_remove_configs
configs += v8_add_configs
}
}
@ -135,6 +146,19 @@ template("v8_executable") {
# For enabling ASLR.
ldflags = [ "-pie" ]
}
if (defined(testonly) && testonly && v8_code_coverage) {
# Only add code coverage cflags for non-test files for performance
# reasons.
if (is_clang) {
configs -= [ "//build/config/sanitizers:default_sanitizer_flags" ]
configs += [ "//build/config/sanitizers:default_sanitizer_flags_but_coverage" ]
} else {
configs -= [ v8_path_prefix + ":v8_gcov_coverage_cflags" ]
}
}
deps += [
v8_path_prefix + ":v8_dump_build_config",
]
}
}

View File

@ -1,81 +0,0 @@
#!/usr/bin/env python
# Copyright 2015 the V8 project authors. All rights reserved.
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to download LLVM gold plugin from google storage."""
import json
import os
import re
import platform
import shutil
import subprocess
import sys
import zipfile
# Bail out on windows and cygwin.
if "win" in platform.system().lower():
# Python 2.7.6 hangs at the second path.insert command on windows. Works
# with python 2.7.8.
print "Gold plugin download not supported on windows."
sys.exit(0)
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
CHROME_SRC = os.path.abspath(os.path.join(SCRIPT_DIR, os.pardir))
sys.path.insert(0, os.path.join(CHROME_SRC, 'tools'))
import find_depot_tools
DEPOT_PATH = find_depot_tools.add_depot_tools_to_path()
GSUTIL_PATH = os.path.join(DEPOT_PATH, 'gsutil.py')
LLVM_BUILD_PATH = os.path.join(CHROME_SRC, 'third_party', 'llvm-build',
'Release+Asserts')
CLANG_UPDATE_PY = os.path.join(CHROME_SRC, 'tools', 'clang', 'scripts',
'update.py')
CLANG_REVISION = os.popen(CLANG_UPDATE_PY + ' --print-revision').read().rstrip()
CLANG_BUCKET = 'gs://chromium-browser-clang/Linux_x64'
GOLD_PLUGIN_PATH = os.path.join(LLVM_BUILD_PATH, 'lib', 'LLVMgold.so')
sys.path.insert(0, os.path.join(CHROME_SRC, 'tools', 'clang', 'scripts'))
import update
def main():
if not re.search(r'cfi_vptr=1', os.environ.get('GYP_DEFINES', '')):
# Bailout if this is not a cfi build.
print 'Skipping gold plugin download for non-cfi build.'
return 0
if (os.path.exists(GOLD_PLUGIN_PATH) and
update.ReadStampFile().strip() == update.PACKAGE_VERSION):
# Bailout if clang is up-to-date. This requires the script to be run before
# the clang update step! I.e. afterwards clang would always be up-to-date.
print 'Skipping gold plugin download. File present and clang up to date.'
return 0
# Make sure this works on empty checkouts (i.e. clang not downloaded yet).
if not os.path.exists(LLVM_BUILD_PATH):
os.makedirs(LLVM_BUILD_PATH)
targz_name = 'llvmgold-%s.tgz' % CLANG_REVISION
remote_path = '%s/%s' % (CLANG_BUCKET, targz_name)
os.chdir(LLVM_BUILD_PATH)
# TODO(pcc): Fix gsutil.py cp url file < /dev/null 2>&0
# (currently aborts with exit code 1,
# https://github.com/GoogleCloudPlatform/gsutil/issues/289) or change the
# stdin->stderr redirect in update.py to do something else (crbug.com/494442).
subprocess.check_call(['python', GSUTIL_PATH,
'cp', remote_path, targz_name],
stderr=open('/dev/null', 'w'))
subprocess.check_call(['tar', 'xzf', targz_name])
os.remove(targz_name)
return 0
if __name__ == '__main__':
sys.exit(main())

View File

@ -73,6 +73,9 @@
# Enable/disable JavaScript API accessors.
'v8_js_accessors%': 0,
# Temporary flag to allow embedders to update their microtasks scopes.
'v8_check_microtasks_scopes_consistency%': 'false',
},
'target_defaults': {
'conditions': [
@ -118,12 +121,15 @@
['dcheck_always_on!=0', {
'defines': ['DEBUG',],
}],
['v8_check_microtasks_scopes_consistency=="true"', {
'defines': ['V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY',],
}],
], # conditions
'configurations': {
'DebugBaseCommon': {
'abstract': 1,
'variables': {
'v8_enable_handle_zapping%': 0,
'v8_enable_handle_zapping%': 1,
},
'conditions': [
['v8_enable_handle_zapping==1', {
@ -133,7 +139,7 @@
}, # Debug
'Release': {
'variables': {
'v8_enable_handle_zapping%': 1,
'v8_enable_handle_zapping%': 0,
},
'conditions': [
['v8_enable_handle_zapping==1', {

View File

@ -75,7 +75,6 @@
'--config-variable', 'has_valgrind=<(has_valgrind)',
'--config-variable', 'icu_use_data_file_flag=<(icu_use_data_file_flag)',
'--config-variable', 'is_gn=0',
'--config-variable', 'lsan=<(lsan)',
'--config-variable', 'msan=<(msan)',
'--config-variable', 'tsan=<(tsan)',
'--config-variable', 'coverage=<(coverage)',

View File

@ -43,6 +43,7 @@
'v8_enable_i18n_support%': 1,
'v8_deprecation_warnings': 1,
'v8_imminent_deprecation_warnings': 1,
'v8_check_microtasks_scopes_consistency': 'true',
'msvs_multi_core_compile%': '1',
'mac_deployment_target%': '10.7',
'release_extra_cflags%': '',
@ -135,8 +136,6 @@
'clang_dir%': '<(base_dir)/third_party/llvm-build/Release+Asserts',
'make_clang_dir%': '<(base_dir)/third_party/llvm-build/Release+Asserts',
'use_lto%': 0,
# Control Flow Integrity for virtual calls and casts.
# See http://clang.llvm.org/docs/ControlFlowIntegrity.html
'cfi_vptr%': 0,
@ -201,7 +200,6 @@
'use_prebuilt_instrumented_libraries%': '<(use_prebuilt_instrumented_libraries)',
'use_custom_libcxx%': '<(use_custom_libcxx)',
'linux_use_bundled_gold%': '<(linux_use_bundled_gold)',
'use_lto%': '<(use_lto)',
'cfi_vptr%': '<(cfi_vptr)',
'cfi_diag%': '<(cfi_diag)',
'cfi_blacklist%': '<(cfi_blacklist)',
@ -264,14 +262,14 @@
# goma doesn't support PDB yet.
'fastbuild%': 1,
}],
['((v8_target_arch=="ia32" or v8_target_arch=="x64" or v8_target_arch=="x87") and \
['((v8_target_arch=="ia32" or v8_target_arch=="x64") and \
(OS=="linux" or OS=="mac")) or (v8_target_arch=="ppc64" and OS=="linux")', {
'v8_enable_gdbjit%': 1,
}, {
'v8_enable_gdbjit%': 0,
}],
['(OS=="linux" or OS=="mac") and (target_arch=="ia32" or target_arch=="x64") and \
(v8_target_arch!="x87" and v8_target_arch!="x32")', {
v8_target_arch!="x32"', {
'clang%': 1,
}, {
'clang%': 0,
@ -292,9 +290,6 @@
# the C++ standard library is used.
'use_custom_libcxx%': 1,
}],
['cfi_vptr==1', {
'use_lto%': 1,
}],
['OS=="android"', {
# Location of Android NDK.
'variables': {
@ -678,15 +673,11 @@
}],
],
}],
['linux_use_bundled_gold==1 and not (clang==0 and use_lto==1)', {
['linux_use_bundled_gold==1', {
# Put our binutils, which contains gold in the search path. We pass
# the path to gold to the compiler. gyp leaves unspecified what the
# cwd is when running the compiler, so the normal gyp path-munging
# fails us. This hack gets the right path.
#
# Disabled when using GCC LTO because GCC also uses the -B search
# path at link time to find "as", and our bundled "as" can only
# target x86.
'ldflags': [
# Note, Chromium allows ia32 host arch as well, we limit this to
# x64 in v8.
@ -696,12 +687,15 @@
['sysroot!="" and clang==1', {
'target_conditions': [
['_toolset=="target"', {
'variables': {
'ld_paths': ['<!(<(DEPTH)/build/linux/sysroot_ld_path.sh <(sysroot))'],
},
'cflags': [
'--sysroot=<(sysroot)',
],
'ldflags': [
'--sysroot=<(sysroot)',
'<!(<(DEPTH)/build/linux/sysroot_ld_path.sh <(sysroot))',
'<!(<(base_dir)/gypfiles/sysroot_ld_flags.sh <@(ld_paths))',
],
}]]
}],
@ -785,6 +779,9 @@
# that signed overflow does not occur. Generates false positives
# (see http://crbug.com/v8/6341).
"-Wno-strict-overflow",
# Don't rely on strict aliasing; v8 does weird pointer casts all
# over the place.
'-fno-strict-aliasing',
],
}],
[ 'clang==1 and (v8_target_arch=="x64" or v8_target_arch=="arm64" \
@ -1210,7 +1207,7 @@
'-L<(android_libcpp_libs)/arm64-v8a',
],
}],
['target_arch=="ia32" or target_arch=="x87"', {
['target_arch=="ia32"', {
# The x86 toolchain currently has problems with stack-protector.
'cflags!': [
'-fstack-protector',
@ -1406,107 +1403,5 @@
}],
],
}],
['use_lto==1', {
'target_defaults': {
'target_conditions': [
['_toolset=="target"', {
'cflags': [
'-flto',
],
}],
],
},
}],
['use_lto==1 and clang==0', {
'target_defaults': {
'target_conditions': [
['_toolset=="target"', {
'cflags': [
'-ffat-lto-objects',
],
}],
],
},
}],
['use_lto==1 and clang==1', {
'target_defaults': {
'target_conditions': [
['_toolset=="target"', {
'arflags': [
'--plugin', '<(clang_dir)/lib/LLVMgold.so',
],
# Apply a lower optimization level with lto. Chromium does this
# for non-official builds only - a differentiation that doesn't
# exist in v8.
'ldflags': [
'-Wl,--plugin-opt,O1',
],
}],
],
},
}],
['use_lto==1 and clang==0', {
'target_defaults': {
'target_conditions': [
['_toolset=="target"', {
'ldflags': [
'-flto=32',
],
}],
],
},
}],
['use_lto==1 and clang==1', {
'target_defaults': {
'target_conditions': [
['_toolset=="target"', {
'ldflags': [
'-flto',
],
}],
],
},
}],
['cfi_diag==1', {
'target_defaults': {
'target_conditions': [
['_toolset=="target"', {
'cflags': [
'-fno-sanitize-trap=cfi',
'-fno-sanitize-recover=cfi',
],
'cflags_cc!': [
'-fno-rtti',
],
'cflags!': [
'-fno-rtti',
],
'ldflags': [
'-fno-sanitize-trap=cfi',
'-fno-sanitize-recover=cfi',
],
}],
],
},
}],
['cfi_vptr==1', {
'target_defaults': {
'target_conditions': [
['_toolset=="target"', {
'cflags': [
'-fsanitize=cfi-vcall',
'-fsanitize=cfi-derived-cast',
'-fsanitize=cfi-unrelated-cast',
'-fsanitize-blacklist=<(cfi_blacklist)',
],
'ldflags': [
'-fsanitize=cfi-vcall',
'-fsanitize=cfi-derived-cast',
'-fsanitize=cfi-unrelated-cast',
],
}],
],
},
}],
],
}

12
deps/v8/gypfiles/sysroot_ld_flags.sh vendored Executable file
View File

@ -0,0 +1,12 @@
#!/bin/sh
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This is for backwards-compatibility after:
# https://codereview.chromium.org/2900193003
for entry in $@; do
echo -L$entry
echo -Wl,-rpath-link=$entry
done | xargs echo

View File

@ -128,9 +128,6 @@
}],
],
# Link-Time Optimizations
'use_lto%': 0,
# Indicates if gcmole tools are downloaded by a hook.
'gcmole%': 0,
},
@ -147,7 +144,7 @@
'host_cxx_is_biarch%': 0,
},
}],
['target_arch=="ia32" or target_arch=="x64" or target_arch=="x87" or \
['target_arch=="ia32" or target_arch=="x64" or \
target_arch=="ppc" or target_arch=="ppc64" or target_arch=="s390" or \
target_arch=="s390x" or clang==1', {
'variables': {
@ -280,17 +277,6 @@
}],
],
}],
# Disable GCC LTO for v8
# v8 is optimized for speed. Because GCC LTO merges flags at link
# time, we disable LTO to prevent any -O2 flags from taking
# precedence over v8's -Os flag. However, LLVM LTO does not work
# this way so we keep LTO enabled under LLVM.
['clang==0 and use_lto==1', {
'cflags!': [
'-flto',
'-ffat-lto-objects',
],
}],
],
}], # _toolset=="target"
],
@ -356,12 +342,6 @@
'V8_TARGET_ARCH_IA32',
],
}], # v8_target_arch=="ia32"
['v8_target_arch=="x87"', {
'defines': [
'V8_TARGET_ARCH_X87',
],
'cflags': ['-march=i586'],
}], # v8_target_arch=="x87"
['v8_target_arch=="mips" or v8_target_arch=="mipsel" \
or v8_target_arch=="mips64" or v8_target_arch=="mips64el"', {
'target_conditions': [
@ -1020,9 +1000,8 @@
['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd" or OS=="mac" or OS=="android" or OS=="qnx") and \
(v8_target_arch=="arm" or v8_target_arch=="ia32" or \
v8_target_arch=="x87" or v8_target_arch=="mips" or \
v8_target_arch=="mipsel" or v8_target_arch=="ppc" or \
v8_target_arch=="s390")', {
v8_target_arch=="mips" or v8_target_arch=="mipsel" or \
v8_target_arch=="ppc" or v8_target_arch=="s390")', {
'target_conditions': [
['_toolset=="host"', {
'conditions': [
@ -1284,9 +1263,7 @@
}],
],
}],
# TODO(pcc): Re-enable in LTO builds once we've fixed the intermittent
# link failures (crbug.com/513074).
['linux_use_gold_flags==1 and use_lto==0', {
['linux_use_gold_flags==1', {
'target_conditions': [
['_toolset=="target"', {
'ldflags': [

69
deps/v8/include/APIDesign.md vendored Normal file
View File

@ -0,0 +1,69 @@
# The V8 public C++ API
# Overview
The V8 public C++ API aims to support four use cases:
1. Enable applications that embed V8 (called the embedder) to configure and run
one or more instances of V8.
2. Expose ECMAScript-like capabilities to the embedder.
3. Enable the embedder to interact with ECMAScript by exposing API objects.
4. Provide access to the V8 debugger (inspector).
# Configuring and running an instance of V8
V8 requires access to certain OS-level primitives such as the ability to
schedule work on threads, or allocate memory.
The embedder can define how to access those primitives via the v8::Platform
interface. While V8 bundles a basic implementation, embedders are highly
encouraged to implement v8::Platform themselves.
Currently, the v8::ArrayBuffer::Allocator is passed to the v8::Isolate factory
method, however, conceptually it should also be part of the v8::Platform since
all instances of V8 should share one allocator.
Once the v8::Platform is configured, an v8::Isolate can be created. All
further interactions with V8 should explicitly reference the v8::Isolate they
refer to. All API methods should eventually take an v8::Isolate parameter.
When a given instance of V8 is no longer needed, it can be destroyed by
disposing the respective v8::Isolate. If the embedder wishes to free all memory
associated with the v8::Isolate, it has to first clear all global handles
associated with that v8::Isolate.
# ECMAScript-like capabilities
In general, the C++ API shouldn't enable capabilities that aren't available to
scripts running in V8. Experience has shown that it's not possible to maintain
such API methods in the long term. However, capabilities also available to
scripts, i.e., ones that are defined in the ECMAScript standard are there to
stay, and we can safely expose them to embedders.
The C++ API should also be pleasant to use, and not require learning new
paradigms. Similarly to how the API exposed to scripts aims to provide good
ergonomics, we should aim to provide a reasonable developer experience for this
API surface.
ECMAScript makes heavy use of exceptions, however, V8's C++ code doesn't use
C++ exceptions. Therefore, all API methods that can throw exceptions should
indicate so by returning a v8::Maybe&lt;&gt; or v8::MaybeLocal&lt;&gt; result,
and by taking a v8::Local&lt;v8::Context&gt; parameter that indicates in which
context a possible exception should be thrown.
# API objects
V8 allows embedders to define special objects that expose additional
capabilities and APIs to scripts. The most prominent example is exposing the
HTML DOM in Blink. Other examples are e.g. node.js. It is less clear what kind
of capabilities we want to expose via this API surface. As a rule of thumb, we
want to expose operations as defined in the WebIDL and HTML spec: we
assume that those requirements are somewhat stable, and that they are a
superset of the requirements of other embedders including node.js.
Ideally, the API surfaces defined in those specs hook into the ECMAScript spec
which in turn guarantees long-term stability of the API.
# The V8 inspector
All debugging capabilities of V8 should be exposed via the inspector protocol.

View File

@ -1,7 +1,9 @@
adamk@chromium.org
danno@chromium.org
jochen@chromium.org
per-file v8-inspector.h=dgozman@chromium.org
per-file v8-inspector.h=pfeldman@chromium.org
per-file v8-inspector-protocol.h=dgozman@chromium.org
per-file v8-inspector-protocol.h=pfeldman@chromium.org
# COMPONENT: Blink>JavaScript>API

29
deps/v8/include/PRESUBMIT.py vendored Normal file
View File

@ -0,0 +1,29 @@
# Copyright 2017 the V8 project authors. All rights reserved.')
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for //v8/include
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
import os
def PostUploadHook(cl, change, output_api):
"""git cl upload will call this hook after the issue is created/modified.
This hook adds extra try bots to the CL description in order to run layout
tests in addition to CQ try bots.
"""
def header_filter(f):
return '.h' in os.path.split(f.LocalPath())[1]
if not change.AffectedFiles(file_filter=header_filter):
return []
return output_api.EnsureCQIncludeTrybotsAreAdded(
cl,
[
'master.tryserver.chromium.linux:linux_chromium_rel_ng'
],
'Automatically added layout test trybots to run tests on CQ.')

View File

@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 6
#define V8_MINOR_VERSION 0
#define V8_BUILD_NUMBER 287
#define V8_PATCH_LEVEL 53
#define V8_MINOR_VERSION 1
#define V8_BUILD_NUMBER 534
#define V8_PATCH_LEVEL 36
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)

348
deps/v8/include/v8.h vendored
View File

@ -869,8 +869,6 @@ class V8_EXPORT HandleScope {
HandleScope(const HandleScope&) = delete;
void operator=(const HandleScope&) = delete;
void* operator new(size_t size);
void operator delete(void*, size_t);
protected:
V8_INLINE HandleScope() {}
@ -881,6 +879,13 @@ class V8_EXPORT HandleScope {
internal::Object* value);
private:
// Declaring operator new and delete as deleted is not spec compliant.
// Therefore declare them private instead to disable dynamic alloc
void* operator new(size_t size);
void* operator new[](size_t size);
void operator delete(void*, size_t);
void operator delete[](void*, size_t);
// Uses heap_object to obtain the current Isolate.
static internal::Object** CreateHandle(internal::HeapObject* heap_object,
internal::Object* value);
@ -921,10 +926,15 @@ class V8_EXPORT EscapableHandleScope : public HandleScope {
EscapableHandleScope(const EscapableHandleScope&) = delete;
void operator=(const EscapableHandleScope&) = delete;
void* operator new(size_t size);
void operator delete(void*, size_t);
private:
// Declaring operator new and delete as deleted is not spec compliant.
// Therefore declare them private instead to disable dynamic alloc
void* operator new(size_t size);
void* operator new[](size_t size);
void operator delete(void*, size_t);
void operator delete[](void*, size_t);
internal::Object** Escape(internal::Object** escape_value);
internal::Object** escape_slot_;
};
@ -941,10 +951,15 @@ class V8_EXPORT SealHandleScope {
SealHandleScope(const SealHandleScope&) = delete;
void operator=(const SealHandleScope&) = delete;
void* operator new(size_t size);
void operator delete(void*, size_t);
private:
// Declaring operator new and delete as deleted is not spec compliant.
// Therefore declare them private instead to disable dynamic alloc
void* operator new(size_t size);
void* operator new[](size_t size);
void operator delete(void*, size_t);
void operator delete[](void*, size_t);
internal::Isolate* const isolate_;
internal::Object** prev_limit_;
int prev_sealed_level_;
@ -1016,9 +1031,6 @@ class ScriptOrigin {
V8_INLINE Local<Value> ResourceName() const;
V8_INLINE Local<Integer> ResourceLineOffset() const;
V8_INLINE Local<Integer> ResourceColumnOffset() const;
/**
* Returns true for embedder's debugger scripts
*/
V8_INLINE Local<Integer> ScriptID() const;
V8_INLINE Local<Value> SourceMapUrl() const;
V8_INLINE ScriptOriginOptions Options() const { return options_; }
@ -1032,7 +1044,6 @@ class ScriptOrigin {
Local<Value> source_map_url_;
};
/**
* A compiled JavaScript script, not yet tied to a Context.
*/
@ -1064,6 +1075,22 @@ class V8_EXPORT UnboundScript {
static const int kNoScriptId = 0;
};
/**
* A location in JavaScript source.
*/
class V8_EXPORT Location {
public:
int GetLineNumber() { return line_number_; }
int GetColumnNumber() { return column_number_; }
Location(int line_number, int column_number)
: line_number_(line_number), column_number_(column_number) {}
private:
int line_number_;
int column_number_;
};
/**
* This is an unfinished experimental feature, and is only exposed
* here for internal testing purposes. DO NOT USE.
@ -1072,6 +1099,28 @@ class V8_EXPORT UnboundScript {
*/
class V8_EXPORT Module {
public:
/**
* The different states a module can be in.
*/
enum Status {
kUninstantiated,
kInstantiating,
kInstantiated,
kEvaluating,
kEvaluated,
kErrored
};
/**
* Returns the module's current status.
*/
Status GetStatus() const;
/**
* For a module in kErrored status, this returns the corresponding exception.
*/
Local<Value> GetException() const;
/**
* Returns the number of modules requested by this module.
*/
@ -1083,6 +1132,12 @@ class V8_EXPORT Module {
*/
Local<String> GetModuleRequest(int i) const;
/**
* Returns the source location (line number and column number) of the ith
* module specifier's first occurrence in this module.
*/
Location GetModuleRequestLocation(int i) const;
/**
* Returns the identity hash for this object.
*/
@ -1095,40 +1150,29 @@ class V8_EXPORT Module {
/**
* ModuleDeclarationInstantiation
*
* Returns false if an exception occurred during instantiation. (In the case
* where the callback throws an exception, that exception is propagated.)
* Returns an empty Maybe<bool> if an exception occurred during
* instantiation. (In the case where the callback throws an exception, that
* exception is propagated.)
*/
V8_WARN_UNUSED_RESULT bool Instantiate(Local<Context> context,
ResolveCallback callback);
V8_DEPRECATED("Use Maybe<bool> version",
bool Instantiate(Local<Context> context,
ResolveCallback callback));
V8_WARN_UNUSED_RESULT Maybe<bool> InstantiateModule(Local<Context> context,
ResolveCallback callback);
/**
* ModuleEvaluation
*
* Returns the completion value.
* TODO(neis): Be more precise or say nothing.
*/
V8_WARN_UNUSED_RESULT MaybeLocal<Value> Evaluate(Local<Context> context);
};
/**
* This is an unfinished experimental feature, and is only exposed
* here for internal testing purposes. DO NOT USE.
*
* A compiled JavaScript module.
*/
class V8_EXPORT DynamicImportResult {
public:
/**
* Resolves the promise with the namespace object of the given
* module.
*/
V8_WARN_UNUSED_RESULT bool FinishDynamicImportSuccess(Local<Context> context,
Local<Module> module);
/**
* Rejects the promise with the given exception.
* Returns the namespace object of this module. The module must have
* been successfully instantiated before and must not be errored.
*/
V8_WARN_UNUSED_RESULT bool FinishDynamicImportFailure(Local<Context> context,
Local<Value> exception);
Local<Value> GetModuleNamespace();
};
/**
@ -3053,12 +3097,9 @@ class V8_EXPORT Object : public Value {
//
// Note also that this only works for named properties.
V8_DEPRECATED("Use CreateDataProperty / DefineOwnProperty",
bool ForceSet(Local<Value> key, Local<Value> value,
PropertyAttribute attribs = None));
V8_DEPRECATE_SOON("Use CreateDataProperty / DefineOwnProperty",
Maybe<bool> ForceSet(Local<Context> context,
Local<Value> key, Local<Value> value,
PropertyAttribute attribs = None));
Maybe<bool> ForceSet(Local<Context> context, Local<Value> key,
Local<Value> value,
PropertyAttribute attribs = None));
V8_DEPRECATE_SOON("Use maybe version", Local<Value> Get(Local<Value> key));
V8_WARN_UNUSED_RESULT MaybeLocal<Value> Get(Local<Context> context,
@ -4063,12 +4104,10 @@ class V8_EXPORT WasmCompiledModule : public Object {
// supports move semantics, and does not support copy semantics.
class TransferrableModule final {
public:
TransferrableModule(TransferrableModule&& src)
: compiled_code(std::move(src.compiled_code)),
wire_bytes(std::move(src.wire_bytes)) {}
TransferrableModule(TransferrableModule&& src) = default;
TransferrableModule(const TransferrableModule& src) = delete;
TransferrableModule& operator=(TransferrableModule&& src);
TransferrableModule& operator=(TransferrableModule&& src) = default;
TransferrableModule& operator=(const TransferrableModule& src) = delete;
private:
@ -4141,11 +4180,9 @@ class V8_EXPORT WasmModuleObjectBuilder final {
// Disable copy semantics *in this implementation*. We can choose to
// relax this, albeit it's not clear why.
WasmModuleObjectBuilder(const WasmModuleObjectBuilder&) = delete;
WasmModuleObjectBuilder(WasmModuleObjectBuilder&& src)
: received_buffers_(std::move(src.received_buffers_)),
total_size_(src.total_size_) {}
WasmModuleObjectBuilder(WasmModuleObjectBuilder&&) = default;
WasmModuleObjectBuilder& operator=(const WasmModuleObjectBuilder&) = delete;
WasmModuleObjectBuilder& operator=(WasmModuleObjectBuilder&&);
WasmModuleObjectBuilder& operator=(WasmModuleObjectBuilder&&) = default;
std::vector<Buffer> received_buffers_;
size_t total_size_ = 0;
@ -4251,7 +4288,18 @@ class V8_EXPORT ArrayBuffer : public Object {
*/
class V8_EXPORT Contents { // NOLINT
public:
Contents() : data_(NULL), byte_length_(0) {}
Contents()
: data_(nullptr),
byte_length_(0),
allocation_base_(nullptr),
allocation_length_(0),
allocation_mode_(Allocator::AllocationMode::kNormal) {}
void* AllocationBase() const { return allocation_base_; }
size_t AllocationLength() const { return allocation_length_; }
Allocator::AllocationMode AllocationMode() const {
return allocation_mode_;
}
void* Data() const { return data_; }
size_t ByteLength() const { return byte_length_; }
@ -4259,6 +4307,9 @@ class V8_EXPORT ArrayBuffer : public Object {
private:
void* data_;
size_t byte_length_;
void* allocation_base_;
size_t allocation_length_;
Allocator::AllocationMode allocation_mode_;
friend class ArrayBuffer;
};
@ -4607,7 +4658,18 @@ class V8_EXPORT SharedArrayBuffer : public Object {
*/
class V8_EXPORT Contents { // NOLINT
public:
Contents() : data_(NULL), byte_length_(0) {}
Contents()
: data_(nullptr),
byte_length_(0),
allocation_base_(nullptr),
allocation_length_(0),
allocation_mode_(ArrayBuffer::Allocator::AllocationMode::kNormal) {}
void* AllocationBase() const { return allocation_base_; }
size_t AllocationLength() const { return allocation_length_; }
ArrayBuffer::Allocator::AllocationMode AllocationMode() const {
return allocation_mode_;
}
void* Data() const { return data_; }
size_t ByteLength() const { return byte_length_; }
@ -4615,6 +4677,9 @@ class V8_EXPORT SharedArrayBuffer : public Object {
private:
void* data_;
size_t byte_length_;
void* allocation_base_;
size_t allocation_length_;
ArrayBuffer::Allocator::AllocationMode allocation_mode_;
friend class SharedArrayBuffer;
};
@ -4861,6 +4926,7 @@ class V8_EXPORT External : public Value {
F(ArrayProto_forEach, array_for_each_iterator) \
F(ArrayProto_keys, array_keys_iterator) \
F(ArrayProto_values, array_values_iterator) \
F(ErrorPrototype, initial_error_prototype) \
F(IteratorPrototype, initial_iterator_prototype)
enum Intrinsic {
@ -5925,6 +5991,8 @@ V8_INLINE Local<Boolean> False(Isolate* isolate);
*
* The arguments for set_max_semi_space_size, set_max_old_space_size,
* set_max_executable_size, set_code_range_size specify limits in MB.
*
* The argument for set_max_semi_space_size_in_kb is in KB.
*/
class V8_EXPORT ResourceConstraints {
public:
@ -5942,10 +6010,28 @@ class V8_EXPORT ResourceConstraints {
void ConfigureDefaults(uint64_t physical_memory,
uint64_t virtual_memory_limit);
int max_semi_space_size() const { return max_semi_space_size_; }
void set_max_semi_space_size(int limit_in_mb) {
max_semi_space_size_ = limit_in_mb;
// Returns the max semi-space size in MB.
V8_DEPRECATE_SOON("Use max_semi_space_size_in_kb()",
int max_semi_space_size()) {
return static_cast<int>(max_semi_space_size_in_kb_ / 1024);
}
// Sets the max semi-space size in MB.
V8_DEPRECATE_SOON("Use set_max_semi_space_size_in_kb(size_t limit_in_kb)",
void set_max_semi_space_size(int limit_in_mb)) {
max_semi_space_size_in_kb_ = limit_in_mb * 1024;
}
// Returns the max semi-space size in KB.
size_t max_semi_space_size_in_kb() const {
return max_semi_space_size_in_kb_;
}
// Sets the max semi-space size in KB.
void set_max_semi_space_size_in_kb(size_t limit_in_kb) {
max_semi_space_size_in_kb_ = limit_in_kb;
}
int max_old_space_size() const { return max_old_space_size_; }
void set_max_old_space_size(int limit_in_mb) {
max_old_space_size_ = limit_in_mb;
@ -5971,7 +6057,10 @@ class V8_EXPORT ResourceConstraints {
}
private:
int max_semi_space_size_;
// max_semi_space_size_ is in KB
size_t max_semi_space_size_in_kb_;
// The remaining limits are in MB
int max_old_space_size_;
int max_executable_size_;
uint32_t* stack_limit_;
@ -6059,21 +6148,23 @@ typedef void (*DeprecatedCallCompletedCallback)();
/**
* HostImportDynamicallyCallback is called when we require the
* embedder to load a module. This is used as part of the dynamic
* import syntax. The behavior of this callback is not specified in
* EcmaScript.
* import syntax.
*
* The referrer is the name of the file which calls the dynamic
* import. The referrer can be used to resolve the module location.
*
* The specifier is the name of the module that should be imported.
*
* The DynamicImportResult object is used to signal success or failure
* by calling it's respective methods.
* The embedder must compile, instantiate, evaluate the Module, and
* obtain it's namespace object.
*
* The Promise returned from this function is forwarded to userland
* JavaScript. The embedder must resolve this promise with the module
* namespace object. In case of an exception, the embedder must reject
* this promise with the exception.
*/
typedef void (*HostImportModuleDynamicallyCallback)(
Isolate* isolate, Local<String> referrer, Local<String> specifier,
Local<DynamicImportResult> result);
typedef MaybeLocal<Promise> (*HostImportModuleDynamicallyCallback)(
Local<Context> context, Local<String> referrer, Local<String> specifier);
/**
* PromiseHook with type kInit is called when a new promise is
@ -6196,11 +6287,18 @@ typedef void (*FailedAccessCheckCallback)(Local<Object> target,
* Callback to check if code generation from strings is allowed. See
* Context::AllowCodeGenerationFromStrings.
*/
typedef bool (*AllowCodeGenerationFromStringsCallback)(Local<Context> context);
typedef bool (*DeprecatedAllowCodeGenerationFromStringsCallback)(
Local<Context> context);
typedef bool (*AllowCodeGenerationFromStringsCallback)(Local<Context> context,
Local<String> source);
// --- WASM compilation callbacks ---
// --- WebAssembly compilation callbacks ---
typedef bool (*ExtensionCallback)(const FunctionCallbackInfo<Value>&);
// --- Callback for APIs defined on v8-supported objects, but implemented
// by the embedder. Example: WebAssembly.{compile|instantiate}Streaming ---
typedef void (*ApiImplementationCallback)(const FunctionCallbackInfo<Value>&);
// --- Garbage Collection Callbacks ---
/**
@ -6624,8 +6722,7 @@ class V8_EXPORT Isolate {
add_histogram_sample_callback(nullptr),
array_buffer_allocator(nullptr),
external_references(nullptr),
allow_atomics_wait(true),
host_import_module_dynamically_callback_(nullptr) {}
allow_atomics_wait(true) {}
/**
* The optional entry_hook allows the host application to provide the
@ -6688,16 +6785,6 @@ class V8_EXPORT Isolate {
* this isolate. This can also be configured via SetAllowAtomicsWait.
*/
bool allow_atomics_wait;
/**
* This is an unfinished experimental feature, and is only exposed
* here for internal testing purposes. DO NOT USE.
*
* This specifies the callback called by the upcoming dynamic
* import() language feature to load modules.
*/
HostImportModuleDynamicallyCallback
host_import_module_dynamically_callback_;
};
@ -6836,6 +6923,7 @@ class V8_EXPORT Isolate {
kAssigmentExpressionLHSIsCallInSloppy = 36,
kAssigmentExpressionLHSIsCallInStrict = 37,
kPromiseConstructorReturnedUndefined = 38,
kConstructorNonUndefinedPrimitiveReturn = 39,
// If you add new values here, you'll also need to update Chromium's:
// UseCounter.h, V8PerIsolateData.cpp, histograms.xml
@ -6888,6 +6976,16 @@ class V8_EXPORT Isolate {
void SetAbortOnUncaughtExceptionCallback(
AbortOnUncaughtExceptionCallback callback);
/**
* This is an unfinished experimental feature, and is only exposed
* here for internal testing purposes. DO NOT USE.
*
* This specifies the callback called by the upcoming dynamic
* import() language feature to load modules.
*/
void SetHostImportModuleDynamicallyCallback(
HostImportModuleDynamicallyCallback callback);
/**
* Optional notification that the system is running low on memory.
* V8 uses these notifications to guide heuristics.
@ -7085,6 +7183,12 @@ class V8_EXPORT Isolate {
*/
Local<Context> GetEnteredOrMicrotaskContext();
/**
* Returns the Context that corresponds to the Incumbent realm in HTML spec.
* https://html.spec.whatwg.org/multipage/webappapis.html#incumbent
*/
Local<Context> GetIncumbentContext();
/**
* Schedules an exception to be thrown when returning to JavaScript. When an
* exception has been scheduled it is illegal to invoke any JavaScript
@ -7137,6 +7241,17 @@ class V8_EXPORT Isolate {
*/
void RemoveGCEpilogueCallback(GCCallback callback);
typedef size_t (*GetExternallyAllocatedMemoryInBytesCallback)();
/**
* Set the callback that tells V8 how much memory is currently allocated
* externally of the V8 heap. Ideally this memory is somehow connected to V8
* objects and may get freed-up when the corresponding V8 objects get
* collected by a V8 garbage collection.
*/
void SetGetExternallyAllocatedMemoryInBytesCallback(
GetExternallyAllocatedMemoryInBytesCallback callback);
/**
* Forcefully terminate the current thread of JavaScript execution
* in the given isolate.
@ -7452,14 +7567,18 @@ class V8_EXPORT Isolate {
*/
void SetAllowCodeGenerationFromStringsCallback(
AllowCodeGenerationFromStringsCallback callback);
V8_DEPRECATED("Use callback with source parameter.",
void SetAllowCodeGenerationFromStringsCallback(
DeprecatedAllowCodeGenerationFromStringsCallback callback));
/**
* Embedder over{ride|load} injection points for wasm APIs.
* Embedder over{ride|load} injection points for wasm APIs. The expectation
* is that the embedder sets them at most once.
*/
void SetWasmModuleCallback(ExtensionCallback callback);
void SetWasmCompileCallback(ExtensionCallback callback);
void SetWasmInstanceCallback(ExtensionCallback callback);
void SetWasmInstantiateCallback(ExtensionCallback callback);
void SetWasmCompileStreamingCallback(ApiImplementationCallback callback);
/**
* Check if V8 is dead and therefore unusable. This is the case after
@ -7556,14 +7675,19 @@ class V8_EXPORT Isolate {
~Isolate() = delete;
Isolate(const Isolate&) = delete;
Isolate& operator=(const Isolate&) = delete;
// Deleting operator new and delete here is allowed as ctor and dtor is also
// deleted.
void* operator new(size_t size) = delete;
void* operator new[](size_t size) = delete;
void operator delete(void*, size_t) = delete;
void operator delete[](void*, size_t) = delete;
private:
template <class K, class V, class Traits>
friend class PersistentValueMapBase;
void ReportExternalAllocationLimitReached();
void CheckMemoryPressure();
};
class V8_EXPORT StartupData {
@ -7611,8 +7735,9 @@ class V8_EXPORT V8 {
* strings should be allowed.
*/
V8_INLINE static V8_DEPRECATED(
"Use isolate version", void SetAllowCodeGenerationFromStringsCallback(
AllowCodeGenerationFromStringsCallback that));
"Use isolate version",
void SetAllowCodeGenerationFromStringsCallback(
DeprecatedAllowCodeGenerationFromStringsCallback that));
/**
* Check if V8 is dead and therefore unusable. This is the case after
@ -7923,7 +8048,7 @@ class V8_EXPORT V8 {
*/
static void ShutdownPlatform();
#if V8_OS_LINUX && V8_TARGET_ARCH_X64 && !V8_OS_ANDROID
#if V8_OS_POSIX
/**
* Give the V8 signal handler a chance to handle a fault.
*
@ -7944,7 +8069,7 @@ class V8_EXPORT V8 {
* points to a ucontext_t structure.
*/
static bool TryHandleSignal(int signal_number, void* info, void* context);
#endif // V8_OS_LINUX
#endif // V8_OS_POSIX
/**
* Enable the default signal handler rather than using one provided by the
@ -8283,10 +8408,15 @@ class V8_EXPORT TryCatch {
TryCatch(const TryCatch&) = delete;
void operator=(const TryCatch&) = delete;
void* operator new(size_t size);
void operator delete(void*, size_t);
private:
// Declaring operator new and delete as deleted is not spec compliant.
// Therefore declare them private instead to disable dynamic alloc
void* operator new(size_t size);
void* operator new[](size_t size);
void operator delete(void*, size_t);
void operator delete[](void*, size_t);
void ResetInternal();
internal::Isolate* isolate_;
@ -8539,6 +8669,27 @@ class V8_EXPORT Context {
Local<Context> context_;
};
/**
* Stack-allocated class to support the backup incumbent settings object
* stack.
* https://html.spec.whatwg.org/multipage/webappapis.html#backup-incumbent-settings-object-stack
*/
class BackupIncumbentScope {
public:
/**
* |backup_incumbent_context| is pushed onto the backup incumbent settings
* object stack.
*/
explicit BackupIncumbentScope(Local<Context> backup_incumbent_context);
~BackupIncumbentScope();
private:
friend class internal::Isolate;
Local<Context> backup_incumbent_context_;
const BackupIncumbentScope* prev_ = nullptr;
};
private:
friend class Value;
friend class Script;
@ -8786,6 +8937,8 @@ class Internals {
static const int kExternalMemoryOffset = 4 * kApiPointerSize;
static const int kExternalMemoryLimitOffset =
kExternalMemoryOffset + kApiInt64Size;
static const int kExternalMemoryAtLastMarkCompactOffset =
kExternalMemoryLimitOffset + kApiInt64Size;
static const int kIsolateRootsOffset = kExternalMemoryLimitOffset +
kApiInt64Size + kApiInt64Size +
kApiPointerSize + kApiPointerSize;
@ -10004,13 +10157,32 @@ uint32_t Isolate::GetNumberOfDataSlots() {
int64_t Isolate::AdjustAmountOfExternalAllocatedMemory(
int64_t change_in_bytes) {
typedef internal::Internals I;
const int64_t kMemoryReducerActivationLimit = 32 * 1024 * 1024;
int64_t* external_memory = reinterpret_cast<int64_t*>(
reinterpret_cast<uint8_t*>(this) + I::kExternalMemoryOffset);
const int64_t external_memory_limit = *reinterpret_cast<int64_t*>(
int64_t* external_memory_limit = reinterpret_cast<int64_t*>(
reinterpret_cast<uint8_t*>(this) + I::kExternalMemoryLimitOffset);
int64_t* external_memory_at_last_mc =
reinterpret_cast<int64_t*>(reinterpret_cast<uint8_t*>(this) +
I::kExternalMemoryAtLastMarkCompactOffset);
const int64_t amount = *external_memory + change_in_bytes;
*external_memory = amount;
if (change_in_bytes > 0 && amount > external_memory_limit) {
int64_t allocation_diff_since_last_mc =
*external_memory_at_last_mc - *external_memory;
allocation_diff_since_last_mc = allocation_diff_since_last_mc < 0
? -allocation_diff_since_last_mc
: allocation_diff_since_last_mc;
if (allocation_diff_since_last_mc > kMemoryReducerActivationLimit) {
CheckMemoryPressure();
}
if (change_in_bytes < 0) {
*external_memory_limit += change_in_bytes;
}
if (change_in_bytes > 0 && amount > *external_memory_limit) {
ReportExternalAllocationLimitReached();
}
return *external_memory;
@ -10040,11 +10212,11 @@ void* Context::GetAlignedPointerFromEmbedderData(int index) {
#endif
}
void V8::SetAllowCodeGenerationFromStringsCallback(
AllowCodeGenerationFromStringsCallback callback) {
DeprecatedAllowCodeGenerationFromStringsCallback callback) {
Isolate* isolate = Isolate::GetCurrent();
isolate->SetAllowCodeGenerationFromStringsCallback(callback);
isolate->SetAllowCodeGenerationFromStringsCallback(
reinterpret_cast<AllowCodeGenerationFromStringsCallback>(callback));
}

View File

@ -61,6 +61,7 @@
// V8_OS_CYGWIN - Cygwin
// V8_OS_DRAGONFLYBSD - DragonFlyBSD
// V8_OS_FREEBSD - FreeBSD
// V8_OS_FUCHSIA - Fuchsia
// V8_OS_LINUX - Linux
// V8_OS_MACOSX - Mac OS X
// V8_OS_NETBSD - NetBSD
@ -95,6 +96,9 @@
# define V8_OS_BSD 1
# define V8_OS_FREEBSD 1
# define V8_OS_POSIX 1
#elif defined(__Fuchsia__)
# define V8_OS_FUCHSIA 1
# define V8_OS_POSIX 1
#elif defined(__DragonFly__)
# define V8_OS_BSD 1
# define V8_OS_DRAGONFLYBSD 1
@ -169,7 +173,6 @@
// supported
// V8_HAS_ATTRIBUTE_DEPRECATED - __attribute__((deprecated)) supported
// V8_HAS_ATTRIBUTE_NOINLINE - __attribute__((noinline)) supported
// V8_HAS_ATTRIBUTE_NORETURN - __attribute__((noreturn)) supported
// V8_HAS_ATTRIBUTE_UNUSED - __attribute__((unused)) supported
// V8_HAS_ATTRIBUTE_VISIBILITY - __attribute__((visibility)) supported
// V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT - __attribute__((warn_unused_result))
@ -209,7 +212,6 @@
# define V8_HAS_ATTRIBUTE_ALWAYS_INLINE (__has_attribute(always_inline))
# define V8_HAS_ATTRIBUTE_DEPRECATED (__has_attribute(deprecated))
# define V8_HAS_ATTRIBUTE_NOINLINE (__has_attribute(noinline))
# define V8_HAS_ATTRIBUTE_NORETURN (__has_attribute(noreturn))
# define V8_HAS_ATTRIBUTE_UNUSED (__has_attribute(unused))
# define V8_HAS_ATTRIBUTE_VISIBILITY (__has_attribute(visibility))
# define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \
@ -250,7 +252,6 @@
# define V8_HAS_ATTRIBUTE_DEPRECATED (V8_GNUC_PREREQ(3, 4, 0))
# define V8_HAS_ATTRIBUTE_DEPRECATED_MESSAGE (V8_GNUC_PREREQ(4, 5, 0))
# define V8_HAS_ATTRIBUTE_NOINLINE (V8_GNUC_PREREQ(3, 4, 0))
# define V8_HAS_ATTRIBUTE_NORETURN (V8_GNUC_PREREQ(2, 5, 0))
# define V8_HAS_ATTRIBUTE_UNUSED (V8_GNUC_PREREQ(2, 95, 0))
# define V8_HAS_ATTRIBUTE_VISIBILITY (V8_GNUC_PREREQ(4, 3, 0))
# define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \
@ -311,18 +312,6 @@
#endif
// A macro used to tell the compiler that a particular function never returns.
// Use like:
// V8_NORETURN void MyAbort() { abort(); }
#if V8_HAS_ATTRIBUTE_NORETURN
# define V8_NORETURN __attribute__((noreturn))
#elif V8_HAS_DECLSPEC_NORETURN
# define V8_NORETURN __declspec(noreturn)
#else
# define V8_NORETURN /* NOT SUPPORTED */
#endif
// A macro (V8_DEPRECATED) to mark classes or functions as deprecated.
#if defined(V8_DEPRECATION_WARNINGS) && V8_HAS_ATTRIBUTE_DEPRECATED_MESSAGE
#define V8_DEPRECATED(message, declarator) \

View File

@ -9,6 +9,9 @@
# Bots are ordered by appearance on waterfall.
'masters': {
'developer_default': {
'android.arm.debug': 'default_debug_android_arm',
'android.arm.optdebug': 'default_optdebug_android_arm',
'android.arm.release': 'default_release_android_arm',
'arm.debug': 'default_debug_arm',
'arm.optdebug': 'default_optdebug_arm',
'arm.release': 'default_release_arm',
@ -42,12 +45,12 @@
},
'client.dart.fyi': {
'v8-linux-release': 'gyp_release_x86_disassembler',
'v8-win-release': 'gyp_release_x86_disassembler',
'v8-mac-release': 'gyp_release_x86_disassembler',
'v8-linux-release': 'gn_release_x86_disassembler',
'v8-win-release': 'gn_release_x86_disassembler',
'v8-mac-release': 'gn_release_x86_disassembler',
},
'client.dynamorio': {
'linux-v8-dr': 'gyp_release_x64',
'linux-v8-dr': 'gn_release_x64',
},
'client.v8': {
# Linux.
@ -60,6 +63,7 @@
'V8 Linux - verify csa': 'gn_release_x86_verify_csa',
# Linux64.
'V8 Linux64 - builder': 'gn_release_x64_valgrind',
'V8 Linux64 - concurrent marking - builder': 'gn_release_x64_concurrent_marking',
'V8 Linux64 - debug builder': 'gn_debug_x64_valgrind',
'V8 Linux64 - custom snapshot - debug builder': 'gn_debug_x64_custom',
'V8 Linux64 - internal snapshot': 'gn_release_x64_internal',
@ -87,8 +91,23 @@
'V8 Linux64 TSAN - concurrent marking':
'gn_release_x64_tsan_concurrent_marking',
'V8 Linux - arm64 - sim - MSAN': 'gn_release_simulate_arm64_msan',
# Clusterfuzz.
# Misc.
'V8 Linux gcc 4.8': 'gn_release_x86_gcc',
'V8 Linux64 gcc 4.8 - debug': 'gn_debug_x64_gcc',
# FYI.
'V8 Linux - swarming staging': 'gn_release_x64',
'V8 Linux64 - cfi': 'gn_release_x64_cfi',
'V8 Linux64 UBSanVptr': 'gn_release_x64_ubsan_vptr',
'V8 Linux - vtunejit': 'gn_debug_x86_vtunejit',
'V8 Linux64 - gcov coverage': 'gn_release_x64_gcc_coverage',
'V8 Linux - predictable': 'gn_release_x86_predictable',
'V8 Linux - full debug': 'gn_full_debug_x86',
'V8 Linux - interpreted regexp': 'gn_release_x86_interpreted_regexp',
'V8 Random Deopt Fuzzer - debug': 'gn_debug_x86',
},
'client.v8.clusterfuzz': {
'V8 Linux64 - release builder': 'gn_release_x64_correctness_fuzzer',
'V8 Linux64 - debug builder': 'gn_debug_x64',
'V8 Linux64 ASAN no inline - release builder':
'gn_release_x64_asan_symbolized_edge_verify_heap',
'V8 Linux64 ASAN - debug builder': 'gn_debug_x64_asan_edge',
@ -98,23 +117,12 @@
'gn_debug_simulate_arm_asan_edge',
'V8 Linux ASAN mipsel - debug builder':
'gn_debug_simulate_mipsel_asan_edge',
# Misc.
'V8 Linux gcc 4.8': 'gn_release_x86_gcc',
'V8 Linux64 gcc 4.8 - debug': 'gn_debug_x64_gcc',
# FYI.
'V8 Linux - swarming staging': 'gn_release_x64',
# TODO(machenbach): Figure out if symbolized is still needed. The
# original config also specified -O1, which we dropped because chromium
# doesn't have it (anymore).
'V8 Linux64 - cfi': 'gyp_release_x64_cfi_symbolized',
'V8 Linux - vtunejit': 'gn_debug_x86_vtunejit',
'V8 Linux64 - gcov coverage': 'gyp_release_x64_gcc_coverage',
'V8 Linux - predictable': 'gn_release_x86_predictable',
'V8 Linux - full debug': 'gyp_full_debug_x86',
'V8 Linux - interpreted regexp': 'gn_release_x86_interpreted_regexp',
'V8 Random Deopt Fuzzer - debug': 'gyp_debug_x86',
'V8 Linux64 CFI - release builder': 'gn_release_x64_cfi_clusterfuzz',
'V8 Linux MSAN no origins':
'gn_release_simulate_arm64_msan_no_origins_edge',
'V8 Linux MSAN chained origins':
'gn_release_simulate_arm64_msan_edge',
},
'client.v8.ports': {
# Arm.
'V8 Arm - builder': 'gn_release_arm',
@ -139,8 +147,6 @@
# S390.
'V8 Linux - s390 - sim': 'gyp_release_simulate_s390',
'V8 Linux - s390x - sim': 'gyp_release_simulate_s390x',
# X87.
'V8 Linux - x87 - nosnap - debug builder': 'gyp_debug_simulate_x87',
},
'client.v8.branches': {
'V8 Linux - beta branch': 'gn_release_x86',
@ -189,10 +195,11 @@
'v8_linux64_asan_rel_ng': 'gn_release_x64_asan_minimal_symbols',
'v8_linux64_msan_rel': 'gn_release_simulate_arm64_msan_minimal_symbols',
'v8_linux64_sanitizer_coverage_rel':
'gyp_release_x64_asan_minimal_symbols_coverage',
'gn_release_x64_asan_minimal_symbols_coverage',
'v8_linux64_tsan_rel': 'gn_release_x64_tsan_minimal_symbols',
'v8_linux64_tsan_concurrent_marking_rel_ng':
'gn_release_x64_tsan_concurrent_marking_minimal_symbols',
'v8_linux64_ubsan_rel_ng': 'gn_release_x64_ubsan_vptr_minimal_symbols',
'v8_win_dbg': 'gn_debug_x86_trybot',
'v8_win_compile_dbg': 'gn_debug_x86_trybot',
'v8_win_rel_ng': 'gn_release_x86_trybot',
@ -231,6 +238,14 @@
'gn', 'debug', 'simulate_arm', 'v8_enable_slow_dchecks'],
'default_release_arm': [
'gn', 'release', 'simulate_arm'],
'default_debug_android_arm': [
'gn', 'debug', 'arm', 'android', 'crosscompile',
'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_android_arm': [
'gn', 'debug', 'arm', 'android', 'crosscompile',
'v8_enable_slow_dchecks' ],
'default_release_android_arm': [
'gn', 'release', 'arm', 'android', 'crosscompile'],
'default_debug_arm64': [
'gn', 'debug', 'simulate_arm64', 'v8_enable_slow_dchecks',
'v8_full_debug'],
@ -321,6 +336,10 @@
'gn_release_simulate_arm64_msan_minimal_symbols': [
'gn', 'release_bot', 'simulate_arm64', 'msan', 'minimal_symbols',
'swarming'],
'gn_release_simulate_arm64_msan_edge': [
'gn', 'release_bot', 'simulate_arm64', 'edge', 'msan'],
'gn_release_simulate_arm64_msan_no_origins_edge': [
'gn', 'release_bot', 'simulate_arm64', 'edge', 'msan_no_origins'],
'gn_release_simulate_arm64_trybot': [
'gn', 'release_trybot', 'simulate_arm64', 'swarming'],
'gn_release_simulate_mipsel': [
@ -330,7 +349,8 @@
# GN debug configs for arm.
'gn_debug_arm': [
'gn', 'debug_bot', 'arm', 'crosscompile', 'hard_float', 'swarming'],
'gn', 'debug_bot', 'arm', 'crosscompile', 'hard_float', 'swarming',
'no_custom_libcxx'],
# GN release configs for arm.
'gn_release_arm': [
@ -350,15 +370,26 @@
'gn_release_x64_asan_minimal_symbols': [
'gn', 'release_bot', 'x64', 'asan', 'lsan', 'minimal_symbols',
'swarming'],
'gn_release_x64_asan_minimal_symbols_coverage': [
'gn', 'release_bot', 'x64', 'asan', 'bb', 'coverage', 'lsan',
'minimal_symbols', 'swarming'],
'gn_release_x64_asan_no_lsan': [
'gn', 'release_bot', 'x64', 'asan', 'swarming'],
'gn_release_x64_asan_symbolized_edge_verify_heap': [
'gn', 'release_bot', 'x64', 'asan', 'edge', 'lsan', 'symbolized',
'v8_verify_heap'],
'gn_release_x64_cfi': [
'gn', 'release_bot', 'x64', 'cfi', 'swarming'],
'gn_release_x64_cfi_clusterfuzz': [
'gn', 'release_bot', 'x64', 'cfi_clusterfuzz'],
'gn_release_x64_clang': [
'gn', 'release_bot', 'x64', 'clang', 'swarming'],
'gn_release_x64_concurrent_marking': [
'gn', 'release_bot', 'x64', 'v8_enable_concurrent_marking', 'swarming'],
'gn_release_x64_correctness_fuzzer' : [
'gn', 'release_bot', 'x64', 'v8_correctness_fuzzer'],
'gn_release_x64_gcc_coverage': [
'gn', 'release_bot', 'x64', 'coverage', 'gcc', 'no_custom_libcxx'],
'gn_release_x64_internal': [
'gn', 'release_bot', 'x64', 'swarming', 'v8_snapshot_internal'],
'gn_release_x64_minimal_symbols': [
@ -375,10 +406,16 @@
'minimal_symbols', 'swarming'],
'gn_release_x64_tsan_minimal_symbols': [
'gn', 'release_bot', 'x64', 'tsan', 'minimal_symbols', 'swarming'],
'gn_release_x64_ubsan_vptr': [
'gn', 'release_bot', 'x64', 'ubsan_vptr'],
'gn_release_x64_ubsan_vptr_minimal_symbols': [
'gn', 'release_bot', 'x64', 'ubsan_vptr', 'minimal_symbols'],
'gn_release_x64_valgrind': [
'gn', 'release_bot', 'x64', 'swarming', 'valgrind'],
'gn', 'release_bot', 'x64', 'swarming', 'valgrind',
'no_custom_libcxx'],
'gn_release_x64_valgrind_trybot': [
'gn', 'release_trybot', 'x64', 'swarming', 'valgrind'],
'gn', 'release_trybot', 'x64', 'swarming', 'valgrind',
'no_custom_libcxx'],
'gn_release_x64_verify_csa': [
'gn', 'release_bot', 'x64', 'swarming', 'dcheck_always_on',
'v8_enable_slow_dchecks', 'v8_verify_csa'],
@ -391,13 +428,14 @@
'gn_debug_x64_custom': [
'gn', 'debug_bot', 'x64', 'swarming', 'v8_snapshot_custom'],
'gn_debug_x64_gcc': [
'gn', 'debug_bot', 'x64', 'gcc'],
'gn', 'debug_bot', 'x64', 'gcc', 'no_custom_libcxx'],
'gn_debug_x64_minimal_symbols': [
'gn', 'debug_bot', 'x64', 'minimal_symbols', 'swarming'],
'gn_debug_x64_trybot': [
'gn', 'debug_trybot', 'x64', 'swarming'],
'gn_debug_x64_valgrind': [
'gn', 'debug_bot', 'x64', 'swarming', 'valgrind'],
'gn', 'debug_bot', 'x64', 'swarming', 'valgrind',
'no_custom_libcxx'],
# GN debug configs for x86.
'gn_debug_x86': [
@ -414,14 +452,20 @@
'gn', 'debug_trybot', 'x86', 'swarming'],
'gn_debug_x86_vtunejit': [
'gn', 'debug_bot', 'x86', 'v8_enable_vtunejit'],
'gn_full_debug_x86': [
'gn', 'debug', 'x86', 'goma', 'static', 'v8_enable_slow_dchecks',
'v8_full_debug'],
# GN release configs for x86.
'gn_release_x86': [
'gn', 'release_bot', 'x86', 'swarming'],
'gn_release_x86_disassembler': [
'gn', 'release_bot', 'x86', 'v8_enable_disassembler'],
'gn_release_x86_gcc': [
'gn', 'release_bot', 'x86', 'gcc'],
'gn', 'release_bot', 'x86', 'gcc', 'no_custom_libcxx'],
'gn_release_x86_gcc_minimal_symbols': [
'gn', 'release_bot', 'x86', 'gcc', 'minimal_symbols'],
'gn', 'release_bot', 'x86', 'gcc', 'minimal_symbols',
'no_custom_libcxx'],
'gn_release_x86_gcmole': [
'gn', 'release_bot', 'x86', 'gcmole', 'swarming'],
'gn_release_x86_gcmole_trybot': [
@ -449,17 +493,6 @@
'gn', 'release_bot', 'x86', 'swarming', 'dcheck_always_on',
'v8_enable_slow_dchecks', 'v8_verify_csa'],
# Gyp debug configs for simulators.
'gyp_debug_simulate_x87': [
'gyp', 'debug_bot_static', 'simulate_x87', 'swarming'],
# Gyp debug configs for x86.
'gyp_debug_x86': [
'gyp', 'debug_bot', 'x86', 'swarming'],
'gyp_full_debug_x86': [
'gyp', 'debug', 'x86', 'goma', 'static', 'v8_enable_slow_dchecks',
'v8_full_debug'],
# Gyp release configs for mips.
'gyp_release_mips_no_snap_no_i18n': [
'gyp', 'release', 'mips', 'crosscompile', 'static', 'v8_no_i18n',
@ -478,17 +511,6 @@
# Gyp release configs for x64.
'gyp_release_x64': [
'gyp', 'release_bot', 'x64', 'swarming'],
'gyp_release_x64_asan_minimal_symbols_coverage': [
'gyp', 'release_bot', 'x64', 'asan', 'bb', 'coverage', 'lsan',
'minimal_symbols', 'swarming'],
'gyp_release_x64_cfi_symbolized': [
'gyp', 'release_bot', 'x64', 'cfi', 'swarming', 'symbolized'],
'gyp_release_x64_gcc_coverage': [
'gyp', 'release_bot', 'x64', 'coverage', 'gcc'],
# Gyp release configs for x86.
'gyp_release_x86_disassembler': [
'gyp', 'release_bot', 'x86', 'v8_enable_disassembler'],
},
'mixins': {
@ -518,7 +540,14 @@
},
'cfi': {
'gn_args': 'is_cfi=true use_cfi_diag=true',
'gn_args': ('is_cfi=true use_cfi_cast=true use_cfi_diag=true '
'use_cfi_recover=false'),
'gyp_defines': 'cfi_vptr=1 cfi_diag=1',
},
'cfi_clusterfuzz': {
'gn_args': ('is_cfi=true use_cfi_cast=true use_cfi_diag=true '
'use_cfi_recover=true'),
'gyp_defines': 'cfi_vptr=1 cfi_diag=1',
},
@ -528,7 +557,7 @@
},
'coverage': {
# TODO(machenbach): Add this to gn.
'gn_args': 'v8_code_coverage=true',
'gyp_defines': 'coverage=1',
},
@ -552,12 +581,6 @@
'v8_optimized_debug'],
},
'debug_bot_static': {
'mixins': [
'debug', 'static', 'goma', 'v8_enable_slow_dchecks',
'v8_optimized_debug'],
},
'debug_trybot': {
'mixins': ['debug_bot', 'minimal_symbols'],
},
@ -611,8 +634,16 @@
'msan': {
'gn_args': ('is_msan=true msan_track_origins=2 '
'use_prebuilt_instrumented_libraries=true'),
'gyp_defines': ('clang=1 msan=1 msan_track_origins=2 '
'use_prebuilt_instrumented_libraries=1'),
},
'msan_no_origins': {
'gn_args': ('is_msan=true msan_track_origins=0 '
'use_prebuilt_instrumented_libraries=true'),
},
# TODO(machenbach): Remove when http://crbug.com/738814 is resolved.
'no_custom_libcxx': {
'gn_args': 'use_custom_libcxx=false',
},
'release': {
@ -673,11 +704,6 @@
'gyp_defines': 'target_arch=x64 v8_target_arch=s390x',
},
'simulate_x87': {
'gn_args': 'target_cpu="x86" v8_target_cpu="x87"',
'gyp_defines': 'target_arch=ia32 v8_target_arch=x87',
},
'static': {
'gn_args': 'is_component_build=false',
'gyp_defines': 'component=static_library',
@ -700,6 +726,13 @@
'gyp_defines': 'clang=1 tsan=1',
},
'ubsan_vptr': {
# TODO(krasin): Remove is_ubsan_no_recover=true when
# https://llvm.org/bugs/show_bug.cgi?id=25569 is fixed and just use
# ubsan_vptr instead.
'gn_args': 'is_ubsan_vptr=true is_ubsan_no_recover=true',
},
'valgrind': {
'gn_args': 'v8_has_valgrind=true',
'gyp_defines': 'has_valgrind=1',

View File

@ -76,9 +76,11 @@ if (v8_snapshot_toolchain == "") {
if (v8_current_cpu == "x64" || v8_current_cpu == "x86") {
_cpus = v8_current_cpu
} else if (v8_current_cpu == "arm64" || v8_current_cpu == "mips64el") {
} else if (v8_current_cpu == "arm64" || v8_current_cpu == "mips64el" ||
v8_current_cpu == "mips64") {
_cpus = "x64_v8_${v8_current_cpu}"
} else if (v8_current_cpu == "arm" || v8_current_cpu == "mipsel") {
} else if (v8_current_cpu == "arm" || v8_current_cpu == "mipsel" ||
v8_current_cpu == "mips") {
_cpus = "x86_v8_${v8_current_cpu}"
} else {
# This branch should not be reached; leave _cpus blank so the assert

2
deps/v8/src/OWNERS vendored
View File

@ -3,3 +3,5 @@ per-file intl.*=mnita@google.com
per-file intl.*=jshin@chromium.org
per-file typing-asm.*=aseemgarg@chromium.org
per-file typing-asm.*=bradnelson@chromium.org
# COMPONENT: Blink>JavaScript>Runtime

29
deps/v8/src/PRESUBMIT.py vendored Normal file
View File

@ -0,0 +1,29 @@
# Copyright 2017 the V8 project authors. All rights reserved.')
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for //v8/src
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
import os
def PostUploadHook(cl, change, output_api):
"""git cl upload will call this hook after the issue is created/modified.
This hook adds extra try bots to the CL description in order to run layout
tests in addition to CQ try bots.
"""
def is_api_cc(f):
return 'api.cc' == os.path.split(f.LocalPath())[1]
if not change.AffectedFiles(file_filter=is_api_cc):
return []
return output_api.EnsureCQIncludeTrybotsAreAdded(
cl,
[
'master.tryserver.chromium.linux:linux_chromium_rel_ng'
],
'Automatically added layout test trybots to run tests on CQ.')

View File

@ -649,11 +649,7 @@ void Accessors::ScriptEvalFromFunctionNameGetter(
Handle<SharedFunctionInfo> shared(
SharedFunctionInfo::cast(script->eval_from_shared()));
// Find the name of the function calling eval.
if (!shared->name()->IsUndefined(isolate)) {
result = Handle<Object>(shared->name(), isolate);
} else {
result = Handle<Object>(shared->inferred_name(), isolate);
}
result = Handle<Object>(shared->name(), isolate);
}
info.GetReturnValue().Set(Utils::ToLocal(result));
}

View File

@ -20,6 +20,8 @@ RootIndexMap::RootIndexMap(Isolate* isolate) {
if (!root->IsHeapObject()) continue;
// Omit root entries that can be written after initialization. They must
// not be referenced through the root list in the snapshot.
// Since we map the raw address of an root item to its root list index, the
// raw address must be constant, i.e. the object must be immovable.
if (isolate->heap()->RootCanBeTreatedAsConstant(root_index)) {
HeapObject* heap_object = HeapObject::cast(root);
Maybe<uint32_t> maybe_index = map_->Get(heap_object);

View File

@ -1,83 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/allocation-site-scopes.h"
#include "src/factory.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
Handle<AllocationSite> AllocationSiteCreationContext::EnterNewScope() {
Handle<AllocationSite> scope_site;
if (top().is_null()) {
// We are creating the top level AllocationSite as opposed to a nested
// AllocationSite.
InitializeTraversal(isolate()->factory()->NewAllocationSite());
scope_site = Handle<AllocationSite>(*top(), isolate());
if (FLAG_trace_creation_allocation_sites) {
PrintF("*** Creating top level AllocationSite %p\n",
static_cast<void*>(*scope_site));
}
} else {
DCHECK(!current().is_null());
scope_site = isolate()->factory()->NewAllocationSite();
if (FLAG_trace_creation_allocation_sites) {
PrintF("Creating nested site (top, current, new) (%p, %p, %p)\n",
static_cast<void*>(*top()),
static_cast<void*>(*current()),
static_cast<void*>(*scope_site));
}
current()->set_nested_site(*scope_site);
update_current_site(*scope_site);
}
DCHECK(!scope_site.is_null());
return scope_site;
}
void AllocationSiteCreationContext::ExitScope(
Handle<AllocationSite> scope_site,
Handle<JSObject> object) {
if (!object.is_null()) {
bool top_level = !scope_site.is_null() &&
top().is_identical_to(scope_site);
scope_site->set_transition_info(*object);
if (FLAG_trace_creation_allocation_sites) {
if (top_level) {
PrintF("*** Setting AllocationSite %p transition_info %p\n",
static_cast<void*>(*scope_site),
static_cast<void*>(*object));
} else {
PrintF("Setting AllocationSite (%p, %p) transition_info %p\n",
static_cast<void*>(*top()),
static_cast<void*>(*scope_site),
static_cast<void*>(*object));
}
}
}
}
bool AllocationSiteUsageContext::ShouldCreateMemento(Handle<JSObject> object) {
if (activated_ && AllocationSite::CanTrack(object->map()->instance_type())) {
if (FLAG_allocation_site_pretenuring ||
AllocationSite::GetMode(object->GetElementsKind()) ==
TRACK_ALLOCATION_SITE) {
if (FLAG_trace_creation_allocation_sites) {
PrintF("*** Creating Memento for %s %p\n",
object->IsJSArray() ? "JSArray" : "JSObject",
static_cast<void*>(*object));
}
return true;
}
}
return false;
}
} // namespace internal
} // namespace v8

View File

@ -7,11 +7,11 @@
#include "src/handles.h"
#include "src/objects.h"
#include "src/objects/map.h"
namespace v8 {
namespace internal {
// AllocationSiteContext is the base class for walking and copying a nested
// boilerplate with AllocationSite and AllocationMemento support.
class AllocationSiteContext {
@ -34,6 +34,8 @@ class AllocationSiteContext {
void InitializeTraversal(Handle<AllocationSite> site) {
top_ = site;
// {current_} is updated in place to not create unnecessary Handles, hence
// we initially need a separate handle.
current_ = Handle<AllocationSite>::New(*top_, isolate());
}
@ -44,18 +46,6 @@ class AllocationSiteContext {
};
// AllocationSiteCreationContext aids in the creation of AllocationSites to
// accompany object literals.
class AllocationSiteCreationContext : public AllocationSiteContext {
public:
explicit AllocationSiteCreationContext(Isolate* isolate)
: AllocationSiteContext(isolate) { }
Handle<AllocationSite> EnterNewScope();
void ExitScope(Handle<AllocationSite> site, Handle<JSObject> object);
};
// AllocationSiteUsageContext aids in the creation of AllocationMementos placed
// behind some/all components of a copied object literal.
class AllocationSiteUsageContext : public AllocationSiteContext {
@ -82,10 +72,26 @@ class AllocationSiteUsageContext : public AllocationSiteContext {
Handle<JSObject> object) {
// This assert ensures that we are pointing at the right sub-object in a
// recursive walk of a nested literal.
DCHECK(object.is_null() || *object == scope_site->transition_info());
DCHECK(object.is_null() || *object == scope_site->boilerplate());
}
bool ShouldCreateMemento(Handle<JSObject> object);
bool ShouldCreateMemento(Handle<JSObject> object) {
if (activated_ &&
AllocationSite::CanTrack(object->map()->instance_type())) {
if (FLAG_allocation_site_pretenuring ||
AllocationSite::ShouldTrack(object->GetElementsKind())) {
if (FLAG_trace_creation_allocation_sites) {
PrintF("*** Creating Memento for %s %p\n",
object->IsJSArray() ? "JSArray" : "JSObject",
static_cast<void*>(*object));
}
return true;
}
}
return false;
}
static const bool kCopying = true;
private:
Handle<AllocationSite> top_site_;

View File

@ -53,7 +53,7 @@ char* StrNDup(const char* str, int n) {
void* AlignedAlloc(size_t size, size_t alignment) {
DCHECK_LE(V8_ALIGNOF(void*), alignment);
DCHECK(base::bits::IsPowerOfTwo64(alignment));
DCHECK(base::bits::IsPowerOfTwo(alignment));
void* ptr;
#if V8_OS_WIN
ptr = _aligned_malloc(size, alignment);

View File

@ -39,15 +39,16 @@ MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
bool is_hidden_prototype,
bool is_prototype);
MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
Handle<FunctionTemplateInfo> data,
Handle<Name> name = Handle<Name>());
MaybeHandle<JSFunction> InstantiateFunction(
Isolate* isolate, Handle<FunctionTemplateInfo> data,
MaybeHandle<Name> maybe_name = MaybeHandle<Name>());
MaybeHandle<Object> Instantiate(Isolate* isolate, Handle<Object> data,
Handle<Name> name = Handle<Name>()) {
MaybeHandle<Object> Instantiate(
Isolate* isolate, Handle<Object> data,
MaybeHandle<Name> maybe_name = MaybeHandle<Name>()) {
if (data->IsFunctionTemplateInfo()) {
return InstantiateFunction(isolate,
Handle<FunctionTemplateInfo>::cast(data), name);
return InstantiateFunction(
isolate, Handle<FunctionTemplateInfo>::cast(data), maybe_name);
} else if (data->IsObjectTemplateInfo()) {
return InstantiateObject(isolate, Handle<ObjectTemplateInfo>::cast(data),
Handle<JSReceiver>(), false, false);
@ -250,7 +251,7 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
DCHECK_EQ(kData, details.kind());
v8::Intrinsic intrinsic =
static_cast<v8::Intrinsic>(Smi::cast(properties->get(i++))->value());
static_cast<v8::Intrinsic>(Smi::ToInt(properties->get(i++)));
auto prop_data = handle(GetIntrinsic(isolate, intrinsic), isolate);
RETURN_ON_EXCEPTION(isolate, DefineDataProperty(isolate, obj, name,
@ -311,7 +312,7 @@ void CacheTemplateInstantiation(Isolate* isolate, int serial_number,
Handle<UnseededNumberDictionary> cache =
isolate->slow_template_instantiations_cache();
auto new_cache =
UnseededNumberDictionary::AtNumberPut(cache, serial_number, object);
UnseededNumberDictionary::Set(cache, serial_number, object);
if (*new_cache != *cache) {
isolate->native_context()->set_slow_template_instantiations_cache(
*new_cache);
@ -333,14 +334,9 @@ void UncacheTemplateInstantiation(Isolate* isolate, int serial_number,
Handle<UnseededNumberDictionary> cache =
isolate->slow_template_instantiations_cache();
int entry = cache->FindEntry(serial_number);
DCHECK(entry != UnseededNumberDictionary::kNotFound);
Handle<Object> result =
UnseededNumberDictionary::DeleteProperty(cache, entry);
USE(result);
DCHECK(result->IsTrue(isolate));
auto new_cache = UnseededNumberDictionary::Shrink(cache, entry);
isolate->native_context()->set_slow_template_instantiations_cache(
*new_cache);
DCHECK_NE(UnseededNumberDictionary::kNotFound, entry);
cache = UnseededNumberDictionary::DeleteEntry(cache, entry);
isolate->native_context()->set_slow_template_instantiations_cache(*cache);
}
}
@ -361,7 +357,7 @@ MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
bool is_hidden_prototype,
bool is_prototype) {
Handle<JSFunction> constructor;
int serial_number = Smi::cast(info->serial_number())->value();
int serial_number = Smi::ToInt(info->serial_number());
if (!new_target.is_null()) {
if (IsSimpleInstantiation(isolate, *info, *new_target)) {
constructor = Handle<JSFunction>::cast(new_target);
@ -402,7 +398,7 @@ MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
ASSIGN_RETURN_ON_EXCEPTION(isolate, object,
JSObject::New(constructor, new_target), JSObject);
if (is_prototype) JSObject::OptimizeAsPrototype(object, FAST_PROTOTYPE);
if (is_prototype) JSObject::OptimizeAsPrototype(object);
ASSIGN_RETURN_ON_EXCEPTION(
isolate, result,
@ -450,8 +446,8 @@ MaybeHandle<Object> GetInstancePrototype(Isolate* isolate,
MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
Handle<FunctionTemplateInfo> data,
Handle<Name> name) {
int serial_number = Smi::cast(data->serial_number())->value();
MaybeHandle<Name> maybe_name) {
int serial_number = Smi::ToInt(data->serial_number());
if (serial_number) {
Handle<JSObject> result;
if (ProbeInstantiationsCache(isolate, serial_number,
@ -492,10 +488,7 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
}
}
Handle<JSFunction> function = ApiNatives::CreateApiFunction(
isolate, data, prototype, ApiNatives::JavaScriptObjectType);
if (!name.is_null() && name->IsString()) {
function->shared()->set_name(*name);
}
isolate, data, prototype, ApiNatives::JavaScriptObjectType, maybe_name);
if (serial_number) {
// Cache the function.
CacheTemplateInstantiation(isolate, serial_number, CachingMode::kUnlimited,
@ -538,10 +531,10 @@ void AddPropertyToPropertyList(Isolate* isolate, Handle<TemplateInfo> templ,
} // namespace
MaybeHandle<JSFunction> ApiNatives::InstantiateFunction(
Handle<FunctionTemplateInfo> data) {
Handle<FunctionTemplateInfo> data, MaybeHandle<Name> maybe_name) {
Isolate* isolate = data->GetIsolate();
InvokeScope invoke_scope(isolate);
return ::v8::internal::InstantiateFunction(isolate, data);
return ::v8::internal::InstantiateFunction(isolate, data, maybe_name);
}
MaybeHandle<JSObject> ApiNatives::InstantiateObject(
@ -562,7 +555,7 @@ MaybeHandle<JSObject> ApiNatives::InstantiateRemoteObject(
Handle<Map> object_map = isolate->factory()->NewMap(
JS_SPECIAL_API_OBJECT_TYPE,
JSObject::kHeaderSize + data->embedder_field_count() * kPointerSize,
FAST_HOLEY_SMI_ELEMENTS);
HOLEY_SMI_ELEMENTS);
object_map->SetConstructor(*constructor);
object_map->set_is_access_check_needed(true);
@ -575,7 +568,7 @@ MaybeHandle<JSObject> ApiNatives::InstantiateRemoteObject(
void ApiNatives::AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
Handle<Name> name, Handle<Object> value,
PropertyAttributes attributes) {
PropertyDetails details(kData, attributes, 0, PropertyCellType::kNoCell);
PropertyDetails details(kData, attributes, PropertyCellType::kNoCell);
auto details_handle = handle(details.AsSmi(), isolate);
Handle<Object> data[] = {name, details_handle, value};
AddPropertyToPropertyList(isolate, info, arraysize(data), data);
@ -587,7 +580,7 @@ void ApiNatives::AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
PropertyAttributes attributes) {
auto value = handle(Smi::FromInt(intrinsic), isolate);
auto intrinsic_marker = isolate->factory()->true_value();
PropertyDetails details(kData, attributes, 0, PropertyCellType::kNoCell);
PropertyDetails details(kData, attributes, PropertyCellType::kNoCell);
auto details_handle = handle(details.AsSmi(), isolate);
Handle<Object> data[] = {name, intrinsic_marker, details_handle, value};
AddPropertyToPropertyList(isolate, info, arraysize(data), data);
@ -600,7 +593,7 @@ void ApiNatives::AddAccessorProperty(Isolate* isolate,
Handle<FunctionTemplateInfo> getter,
Handle<FunctionTemplateInfo> setter,
PropertyAttributes attributes) {
PropertyDetails details(kAccessor, attributes, 0, PropertyCellType::kNoCell);
PropertyDetails details(kAccessor, attributes, PropertyCellType::kNoCell);
auto details_handle = handle(details.AsSmi(), isolate);
Handle<Object> data[] = {name, details_handle, getter, setter};
AddPropertyToPropertyList(isolate, info, arraysize(data), data);
@ -621,12 +614,16 @@ void ApiNatives::AddNativeDataProperty(Isolate* isolate,
info->set_property_accessors(*list);
}
Handle<JSFunction> ApiNatives::CreateApiFunction(
Isolate* isolate, Handle<FunctionTemplateInfo> obj,
Handle<Object> prototype, ApiInstanceType instance_type) {
Handle<Object> prototype, ApiInstanceType instance_type,
MaybeHandle<Name> maybe_name) {
Handle<SharedFunctionInfo> shared =
FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(isolate, obj);
FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(isolate, obj,
maybe_name);
// To simplify things, API functions always have shared name.
DCHECK(shared->has_shared_name());
Handle<JSFunction> result =
isolate->factory()->NewFunctionFromSharedFunctionInfo(
shared, isolate->native_context());
@ -695,7 +692,7 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
}
Handle<Map> map =
isolate->factory()->NewMap(type, instance_size, FAST_HOLEY_SMI_ELEMENTS);
isolate->factory()->NewMap(type, instance_size, HOLEY_SMI_ELEMENTS);
JSFunction::SetInitialMap(result, map, Handle<JSObject>::cast(prototype));
// Mark as undetectable if needed.

View File

@ -20,7 +20,8 @@ class ApiNatives {
static const int kInitialFunctionCacheSize = 256;
MUST_USE_RESULT static MaybeHandle<JSFunction> InstantiateFunction(
Handle<FunctionTemplateInfo> data);
Handle<FunctionTemplateInfo> data,
MaybeHandle<Name> maybe_name = MaybeHandle<Name>());
MUST_USE_RESULT static MaybeHandle<JSObject> InstantiateObject(
Handle<ObjectTemplateInfo> data,
@ -35,10 +36,10 @@ class ApiNatives {
GlobalProxyType
};
static Handle<JSFunction> CreateApiFunction(Isolate* isolate,
Handle<FunctionTemplateInfo> obj,
Handle<Object> prototype,
ApiInstanceType instance_type);
static Handle<JSFunction> CreateApiFunction(
Isolate* isolate, Handle<FunctionTemplateInfo> obj,
Handle<Object> prototype, ApiInstanceType instance_type,
MaybeHandle<Name> maybe_name = MaybeHandle<Name>());
static void AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
Handle<Name> name, Handle<Object> value,

756
deps/v8/src/api.cc vendored

File diff suppressed because it is too large Load Diff

6
deps/v8/src/api.h vendored
View File

@ -111,8 +111,7 @@ class RegisteredExtension {
V(NativeWeakMap, JSWeakMap) \
V(debug::GeneratorObject, JSGeneratorObject) \
V(debug::Script, Script) \
V(Promise, JSPromise) \
V(DynamicImportResult, JSPromise)
V(Promise, JSPromise)
class Utils {
public:
@ -186,8 +185,6 @@ class Utils {
v8::internal::Handle<v8::internal::Object> obj);
static inline Local<Promise> PromiseToLocal(
v8::internal::Handle<v8::internal::JSObject> obj);
static inline Local<DynamicImportResult> PromiseToDynamicImportResult(
v8::internal::Handle<v8::internal::JSPromise> obj);
static inline Local<StackTrace> StackTraceToLocal(
v8::internal::Handle<v8::internal::FixedArray> obj);
static inline Local<StackFrame> StackFrameToLocal(
@ -320,7 +317,6 @@ MAKE_TO_LOCAL(SignatureToLocal, FunctionTemplateInfo, Signature)
MAKE_TO_LOCAL(AccessorSignatureToLocal, FunctionTemplateInfo, AccessorSignature)
MAKE_TO_LOCAL(MessageToLocal, Object, Message)
MAKE_TO_LOCAL(PromiseToLocal, JSObject, Promise)
MAKE_TO_LOCAL(PromiseToDynamicImportResult, JSPromise, DynamicImportResult)
MAKE_TO_LOCAL(StackTraceToLocal, FixedArray, StackTrace)
MAKE_TO_LOCAL(StackFrameToLocal, StackFrameInfo, StackFrame)
MAKE_TO_LOCAL(NumberToLocal, Object, Number)

View File

@ -50,9 +50,7 @@ class Arguments BASE_EMBEDDED {
return Handle<S>(reinterpret_cast<S**>(value));
}
int smi_at(int index) {
return Smi::cast((*this)[index])->value();
}
int smi_at(int index) { return Smi::ToInt((*this)[index]); }
double number_at(int index) {
return (*this)[index]->Number();

View File

@ -280,7 +280,7 @@ void RelocInfo::Visit(Heap* heap) {
Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
rm_ = no_reg;
imm32_ = immediate;
value_.immediate = immediate;
rmode_ = rmode;
}
@ -288,14 +288,14 @@ Operand Operand::Zero() { return Operand(static_cast<int32_t>(0)); }
Operand::Operand(const ExternalReference& f) {
rm_ = no_reg;
imm32_ = reinterpret_cast<int32_t>(f.address());
value_.immediate = reinterpret_cast<int32_t>(f.address());
rmode_ = RelocInfo::EXTERNAL_REFERENCE;
}
Operand::Operand(Smi* value) {
rm_ = no_reg;
imm32_ = reinterpret_cast<intptr_t>(value);
value_.immediate = reinterpret_cast<intptr_t>(value);
rmode_ = RelocInfo::NONE32;
}
@ -400,11 +400,7 @@ void Assembler::deserialization_set_target_internal_reference_at(
bool Assembler::is_constant_pool_load(Address pc) {
if (CpuFeatures::IsSupported(ARMv7)) {
return !Assembler::IsMovW(Memory::int32_at(pc));
} else {
return !Assembler::IsMovImmed(Memory::int32_at(pc));
}
return IsLdrPcImmediateOffset(Memory::int32_at(pc));
}

File diff suppressed because it is too large Load Diff

View File

@ -45,6 +45,8 @@
#include "src/arm/constants-arm.h"
#include "src/assembler.h"
#include "src/double.h"
#include "src/float.h"
namespace v8 {
namespace internal {
@ -501,7 +503,7 @@ class Operand BASE_EMBEDDED {
RelocInfo::Mode rmode = RelocInfo::NONE32));
INLINE(static Operand Zero());
INLINE(explicit Operand(const ExternalReference& f));
explicit Operand(Handle<Object> handle);
explicit Operand(Handle<HeapObject> handle);
INLINE(explicit Operand(Smi* value));
// rm
@ -524,18 +526,29 @@ class Operand BASE_EMBEDDED {
// rm <shift_op> rs
explicit Operand(Register rm, ShiftOp shift_op, Register rs);
static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
static Operand EmbeddedCode(CodeStub* stub);
// Return true if this is a register operand.
INLINE(bool is_reg() const) {
bool IsRegister() const {
return rm_.is_valid() &&
rs_.is(no_reg) &&
shift_op_ == LSL &&
shift_imm_ == 0;
}
// Return true if this is a register operand shifted with an immediate.
bool IsImmediateShiftedRegister() const {
return rm_.is_valid() && !rs_.is_valid();
}
// Return true if this is a register operand shifted with a register.
bool IsRegisterShiftedRegister() const {
return rm_.is_valid() && rs_.is_valid();
}
// Return the number of actual instructions required to implement the given
// instruction for this particular operand. This can be a single instruction,
// if no load into the ip register is necessary, or anything between 2 and 4
// instructions when we need to load from the constant pool (depending upon
// if no load into a scratch register is necessary, or anything between 2 and
// 4 instructions when we need to load from the constant pool (depending upon
// whether the constant pool entry is in the small or extended section). If
// the instruction this operand is used for is a MOV or MVN instruction the
// actual instruction to use is required for this calculation. For other
@ -543,24 +556,46 @@ class Operand BASE_EMBEDDED {
//
// The value returned is only valid as long as no entries are added to the
// constant pool between this call and the actual instruction being emitted.
int instructions_required(const Assembler* assembler, Instr instr = 0) const;
bool must_output_reloc_info(const Assembler* assembler) const;
int InstructionsRequired(const Assembler* assembler, Instr instr = 0) const;
bool MustOutputRelocInfo(const Assembler* assembler) const;
inline int32_t immediate() const {
DCHECK(!rm_.is_valid());
return imm32_;
DCHECK(IsImmediate());
DCHECK(!IsHeapObjectRequest());
return value_.immediate;
}
bool IsImmediate() const {
return !rm_.is_valid();
}
HeapObjectRequest heap_object_request() const {
DCHECK(IsHeapObjectRequest());
return value_.heap_object_request;
}
bool IsHeapObjectRequest() const {
DCHECK_IMPLIES(is_heap_object_request_, IsImmediate());
DCHECK_IMPLIES(is_heap_object_request_,
rmode_ == RelocInfo::EMBEDDED_OBJECT ||
rmode_ == RelocInfo::CODE_TARGET);
return is_heap_object_request_;
}
Register rm() const { return rm_; }
Register rs() const { return rs_; }
ShiftOp shift_op() const { return shift_op_; }
private:
Register rm_;
Register rs_;
ShiftOp shift_op_;
int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg
int32_t imm32_; // valid if rm_ == no_reg
int shift_imm_; // valid if rm_ != no_reg && rs_ == no_reg
union Value {
Value() {}
HeapObjectRequest heap_object_request; // if is_heap_object_request_
int32_t immediate; // otherwise
} value_; // valid if rm_ == no_reg
bool is_heap_object_request_ = false;
RelocInfo::Mode rmode_;
friend class Assembler;
@ -573,8 +608,9 @@ class MemOperand BASE_EMBEDDED {
// [rn +/- offset] Offset/NegOffset
// [rn +/- offset]! PreIndex/NegPreIndex
// [rn], +/- offset PostIndex/NegPostIndex
// offset is any signed 32-bit value; offset is first loaded to register ip if
// it does not fit the addressing mode (12-bit unsigned and sign bit)
// offset is any signed 32-bit value; offset is first loaded to a scratch
// register if it does not fit the addressing mode (12-bit unsigned and sign
// bit)
explicit MemOperand(Register rn, int32_t offset = 0, AddrMode am = Offset);
// [rn +/- rm] Offset/NegOffset
@ -703,7 +739,7 @@ class Assembler : public AssemblerBase {
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls.
void GetCode(CodeDesc* desc);
void GetCode(Isolate* isolate, CodeDesc* desc);
// Label operations & relative jumps (PPUM Appendix D)
//
@ -789,6 +825,8 @@ class Assembler : public AssemblerBase {
static constexpr int kDebugBreakSlotLength =
kDebugBreakSlotInstructions * kInstrSize;
RegList* GetScratchRegisterList() { return &scratch_register_list_; }
// ---------------------------------------------------------------------------
// Code generation
@ -1131,10 +1169,10 @@ class Assembler : public AssemblerBase {
SwVfpRegister last,
Condition cond = al);
void vmov(const SwVfpRegister dst, float imm);
void vmov(const SwVfpRegister dst, Float32 imm);
void vmov(const DwVfpRegister dst,
double imm,
const Register scratch = no_reg);
Double imm,
const Register extra_scratch = no_reg);
void vmov(const SwVfpRegister dst,
const SwVfpRegister src,
const Condition cond = al);
@ -1491,25 +1529,41 @@ class Assembler : public AssemblerBase {
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
};
// Class for blocking sharing of code targets in constant pool.
class BlockCodeTargetSharingScope {
public:
explicit BlockCodeTargetSharingScope(Assembler* assem) : assem_(nullptr) {
Open(assem);
}
// This constructor does not initialize the scope. The user needs to
// explicitly call Open() before using it.
BlockCodeTargetSharingScope() : assem_(nullptr) {}
~BlockCodeTargetSharingScope() {
Close();
}
void Open(Assembler* assem) {
DCHECK_NULL(assem_);
DCHECK_NOT_NULL(assem);
assem_ = assem;
assem_->StartBlockCodeTargetSharing();
}
private:
void Close() {
if (assem_ != nullptr) {
assem_->EndBlockCodeTargetSharing();
}
}
Assembler* assem_;
DISALLOW_COPY_AND_ASSIGN(BlockCodeTargetSharingScope);
};
// Debugging
// Mark address of a debug break slot.
void RecordDebugBreakSlot(RelocInfo::Mode mode);
// Record the AST id of the CallIC being compiled, so that it can be placed
// in the relocation information.
void SetRecordedAstId(TypeFeedbackId ast_id) {
DCHECK(recorded_ast_id_.IsNone());
recorded_ast_id_ = ast_id;
}
TypeFeedbackId RecordedAstId() {
DCHECK(!recorded_ast_id_.IsNone());
return recorded_ast_id_;
}
void ClearRecordedAstId() { recorded_ast_id_ = TypeFeedbackId::None(); }
// Record a comment relocation entry that can be used by a disassembler.
// Use --code-comments to enable.
void RecordComment(const char* msg);
@ -1636,11 +1690,6 @@ class Assembler : public AssemblerBase {
}
protected:
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
// the relocation info.
TypeFeedbackId recorded_ast_id_;
int buffer_space() const { return reloc_info_writer.pos() - pc_; }
// Decode branch instruction at pos and return branch target pos
@ -1649,8 +1698,22 @@ class Assembler : public AssemblerBase {
// Patch branch instruction at pos to branch to given branch target pos
void target_at_put(int pos, int target_pos);
// Prevent sharing of code target constant pool entries until
// EndBlockCodeTargetSharing is called. Calls to this function can be nested
// but must be followed by an equal number of call to
// EndBlockCodeTargetSharing.
void StartBlockCodeTargetSharing() {
++code_target_sharing_blocked_nesting_;
}
// Resume sharing of constant pool code target entries. Needs to be called
// as many times as StartBlockCodeTargetSharing to have an effect.
void EndBlockCodeTargetSharing() {
--code_target_sharing_blocked_nesting_;
}
// Prevent contant pool emission until EndBlockConstPool is called.
// Call to this function can be nested but must be followed by an equal
// Calls to this function can be nested but must be followed by an equal
// number of call to EndBlockConstpool.
void StartBlockConstPool() {
if (const_pool_blocked_nesting_++ == 0) {
@ -1660,7 +1723,7 @@ class Assembler : public AssemblerBase {
}
}
// Resume constant pool emission. Need to be called as many time as
// Resume constant pool emission. Needs to be called as many times as
// StartBlockConstPool to have an effect.
void EndBlockConstPool() {
if (--const_pool_blocked_nesting_ == 0) {
@ -1726,6 +1789,12 @@ class Assembler : public AssemblerBase {
std::vector<ConstantPoolEntry> pending_32_bit_constants_;
std::vector<ConstantPoolEntry> pending_64_bit_constants_;
// Map of address of handle to index in pending_32_bit_constants_.
std::map<Address, int> handle_to_index_map_;
// Scratch registers available for use by the Assembler.
RegList scratch_register_list_;
private:
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512 * MB;
@ -1749,6 +1818,11 @@ class Assembler : public AssemblerBase {
static constexpr int kCheckPoolIntervalInst = 32;
static constexpr int kCheckPoolInterval = kCheckPoolIntervalInst * kInstrSize;
// Sharing of code target entries may be blocked in some code sequences.
int code_target_sharing_blocked_nesting_;
bool IsCodeTargetSharingAllowed() const {
return code_target_sharing_blocked_nesting_ == 0;
}
// Emission of the constant pool may be blocked in some code sequences.
int const_pool_blocked_nesting_; // Block emission if this is not zero.
@ -1766,16 +1840,21 @@ class Assembler : public AssemblerBase {
void GrowBuffer();
// 32-bit immediate values
void move_32_bit_immediate(Register rd,
const Operand& x,
Condition cond = al);
void Move32BitImmediate(Register rd, const Operand& x, Condition cond = al);
// Instruction generation
void addrmod1(Instr instr, Register rn, Register rd, const Operand& x);
void addrmod2(Instr instr, Register rd, const MemOperand& x);
void addrmod3(Instr instr, Register rd, const MemOperand& x);
void addrmod4(Instr instr, Register rn, RegList rl);
void addrmod5(Instr instr, CRegister crd, const MemOperand& x);
void AddrMode1(Instr instr, Register rd, Register rn, const Operand& x);
// Attempt to encode operand |x| for instruction |instr| and return true on
// success. The result will be encoded in |instr| directly. This method may
// change the opcode if deemed beneficial, for instance, MOV may be turned
// into MVN, ADD into SUB, AND into BIC, ...etc. The only reason this method
// may fail is that the operand is an immediate that cannot be encoded.
bool AddrMode1TryEncodeOperand(Instr* instr, const Operand& x);
void AddrMode2(Instr instr, Register rd, const MemOperand& x);
void AddrMode3(Instr instr, Register rd, const MemOperand& x);
void AddrMode4(Instr instr, Register rn, RegList rl);
void AddrMode5(Instr instr, CRegister crd, const MemOperand& x);
// Labels
void print(Label* L);
@ -1784,15 +1863,28 @@ class Assembler : public AssemblerBase {
// Record reloc info for current pc_
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
ConstantPoolEntry::Access ConstantPoolAddEntry(int position,
RelocInfo::Mode rmode,
intptr_t value);
ConstantPoolEntry::Access ConstantPoolAddEntry(int position, double value);
void ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
intptr_t value);
void ConstantPoolAddEntry(int position, Double value);
friend class RelocInfo;
friend class CodePatcher;
friend class BlockConstPoolScope;
friend class BlockCodeTargetSharingScope;
friend class EnsureSpace;
// The following functions help with avoiding allocations of embedded heap
// objects during the code assembly phase. {RequestHeapObject} records the
// need for a future heap number allocation or code stub generation. After
// code assembly, {AllocateAndInstallRequestedHeapObjects} will allocate these
// objects and place them where they are expected (determined by the pc offset
// associated with each request). That is, for each request, it will patch the
// dummy heap object handle that we emitted during code assembly with the
// actual heap object handle.
void RequestHeapObject(HeapObjectRequest request);
void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
std::forward_list<HeapObjectRequest> heap_object_requests_;
};
constexpr int kNoCodeAgeSequenceLength = 3 * Assembler::kInstrSize;
@ -1811,6 +1903,29 @@ class PatchingAssembler : public Assembler {
void FlushICache(Isolate* isolate);
};
// This scope utility allows scratch registers to be managed safely. The
// Assembler's GetScratchRegisterList() is used as a pool of scratch
// registers. These registers can be allocated on demand, and will be returned
// at the end of the scope.
//
// When the scope ends, the Assembler's list will be restored to its original
// state, even if the list is modified by some other means. Note that this scope
// can be nested but the destructors need to run in the opposite order as the
// constructors. We do not have assertions for this.
class UseScratchRegisterScope {
public:
explicit UseScratchRegisterScope(Assembler* assembler);
~UseScratchRegisterScope();
// Take a register from the list and return it.
Register Acquire();
private:
// Currently available scratch registers.
RegList* available_;
// Available scratch registers at the start of this scope.
RegList old_available_;
};
} // namespace internal
} // namespace v8

View File

@ -12,6 +12,7 @@
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/counters.h"
#include "src/double.h"
#include "src/heap/heap-inl.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
@ -51,29 +52,6 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
Register rhs);
void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
ExternalReference miss) {
// Update the static counter each time a new code stub is generated.
isolate()->counters()->code_stubs()->Increment();
CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
int param_count = descriptor.GetRegisterParameterCount();
{
// Call the runtime system in a fresh internal frame.
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
DCHECK(param_count == 0 ||
r0.is(descriptor.GetRegisterParameter(param_count - 1)));
// Push arguments
for (int i = 0; i < param_count; ++i) {
__ push(descriptor.GetRegisterParameter(i));
}
__ CallExternalReference(miss, param_count);
}
__ Ret();
}
void DoubleToIStub::Generate(MacroAssembler* masm) {
Label out_of_range, only_low, negate, done;
Register input_reg = source();
@ -671,7 +649,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
const int fp_argument_count = 0;
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
__ PrepareCallCFunction(argument_count, fp_argument_count);
__ mov(r0, Operand(ExternalReference::isolate_address(isolate())));
__ CallCFunction(
ExternalReference::store_buffer_overflow_function(isolate()),
@ -710,7 +688,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ push(lr);
{
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(0, 2, scratch);
__ PrepareCallCFunction(0, 2);
__ MovToFloatParameters(double_base, double_exponent);
__ CallCFunction(
ExternalReference::power_double_double_function(isolate()), 0, 2);
@ -731,7 +709,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ mov(exponent, scratch);
}
__ vmov(double_scratch, double_base); // Back up base.
__ vmov(double_result, 1.0, scratch2);
__ vmov(double_result, Double(1.0), scratch2);
// Get absolute value of exponent.
__ cmp(scratch, Operand::Zero());
@ -746,7 +724,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ cmp(exponent, Operand::Zero());
__ b(ge, &done);
__ vmov(double_scratch, 1.0, scratch);
__ vmov(double_scratch, Double(1.0), scratch);
__ vdiv(double_result, double_scratch, double_result);
// Test whether result is zero. Bail out to check for subnormal result.
// Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
@ -761,7 +739,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ push(lr);
{
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(0, 2, scratch);
__ PrepareCallCFunction(0, 2);
__ MovToFloatParameters(double_base, double_exponent);
__ CallCFunction(ExternalReference::power_double_double_function(isolate()),
0, 2);
@ -781,12 +759,9 @@ bool CEntryStub::NeedsImmovableCode() {
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
CreateWeakCellStub::GenerateAheadOfTime(isolate);
BinaryOpICStub::GenerateAheadOfTime(isolate);
BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
}
@ -847,7 +822,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
if (FLAG_debug_code) {
if (frame_alignment > kPointerSize) {
Label alignment_as_expected;
DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
__ tst(sp, Operand(frame_alignment_mask));
__ b(eq, &alignment_as_expected);
// Don't use Check here, as it will call Runtime_Abort re-entering here.
@ -911,7 +886,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
if (FLAG_debug_code) {
Label okay;
ExternalReference pending_exception_address(
Isolate::kPendingExceptionAddress, isolate());
IsolateAddressId::kPendingExceptionAddress, isolate());
__ mov(r3, Operand(pending_exception_address));
__ ldr(r3, MemOperand(r3));
__ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
@ -940,15 +915,15 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ bind(&exception_returned);
ExternalReference pending_handler_context_address(
Isolate::kPendingHandlerContextAddress, isolate());
IsolateAddressId::kPendingHandlerContextAddress, isolate());
ExternalReference pending_handler_code_address(
Isolate::kPendingHandlerCodeAddress, isolate());
IsolateAddressId::kPendingHandlerCodeAddress, isolate());
ExternalReference pending_handler_offset_address(
Isolate::kPendingHandlerOffsetAddress, isolate());
IsolateAddressId::kPendingHandlerOffsetAddress, isolate());
ExternalReference pending_handler_fp_address(
Isolate::kPendingHandlerFPAddress, isolate());
IsolateAddressId::kPendingHandlerFPAddress, isolate());
ExternalReference pending_handler_sp_address(
Isolate::kPendingHandlerSPAddress, isolate());
IsolateAddressId::kPendingHandlerSPAddress, isolate());
// Ask the runtime for help to determine the handler. This will set r0 to
// contain the current pending exception, don't clobber it.
@ -956,7 +931,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
isolate());
{
FrameScope scope(masm, StackFrame::MANUAL);
__ PrepareCallCFunction(3, 0, r0);
__ PrepareCallCFunction(3, 0);
__ mov(r0, Operand(0));
__ mov(r1, Operand(0));
__ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
@ -1006,7 +981,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Save callee-saved vfp registers.
__ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
// Set up the reserved register for 0.0.
__ vmov(kDoubleRegZero, 0.0);
__ vmov(kDoubleRegZero, Double(0.0));
// Get address of argv, see stm above.
// r0: code entry
@ -1028,31 +1003,38 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
StackFrame::Type marker = type();
__ mov(r7, Operand(StackFrame::TypeToMarker(marker)));
__ mov(r6, Operand(StackFrame::TypeToMarker(marker)));
__ mov(r5,
Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
__ mov(r5, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
isolate())));
__ ldr(r5, MemOperand(r5));
__ mov(ip, Operand(-1)); // Push a bad frame pointer to fail if it is used.
__ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() |
ip.bit());
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
// Push a bad frame pointer to fail if it is used.
__ mov(scratch, Operand(-1));
__ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | scratch.bit());
}
Register scratch = r6;
// Set up frame pointer for the frame to be pushed.
__ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
// If this is the outermost JS call, set js_entry_sp value.
Label non_outermost_js;
ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
ExternalReference js_entry_sp(IsolateAddressId::kJSEntrySPAddress, isolate());
__ mov(r5, Operand(ExternalReference(js_entry_sp)));
__ ldr(r6, MemOperand(r5));
__ cmp(r6, Operand::Zero());
__ ldr(scratch, MemOperand(r5));
__ cmp(scratch, Operand::Zero());
__ b(ne, &non_outermost_js);
__ str(fp, MemOperand(r5));
__ mov(ip, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ mov(scratch, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
Label cont;
__ b(&cont);
__ bind(&non_outermost_js);
__ mov(ip, Operand(StackFrame::INNER_JSENTRY_FRAME));
__ mov(scratch, Operand(StackFrame::INNER_JSENTRY_FRAME));
__ bind(&cont);
__ push(ip);
__ push(scratch);
// Jump to a faked try block that does the invoke, with a faked catch
// block that sets the pending exception.
@ -1069,10 +1051,11 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// field in the JSEnv and return a failure sentinel. Coming in here the
// fp will be invalid because the PushStackHandler below sets it to 0 to
// signal the existence of the JSEntry frame.
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate())));
__ mov(scratch,
Operand(ExternalReference(IsolateAddressId::kPendingExceptionAddress,
isolate())));
}
__ str(r0, MemOperand(ip));
__ str(r0, MemOperand(scratch));
__ LoadRoot(r0, Heap::kExceptionRootIndex);
__ b(&exit);
@ -1098,16 +1081,16 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
if (type() == StackFrame::ENTRY_CONSTRUCT) {
ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
isolate());
__ mov(ip, Operand(construct_entry));
__ mov(scratch, Operand(construct_entry));
} else {
ExternalReference entry(Builtins::kJSEntryTrampoline, isolate());
__ mov(ip, Operand(entry));
__ mov(scratch, Operand(entry));
}
__ ldr(ip, MemOperand(ip)); // deref address
__ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
__ ldr(scratch, MemOperand(scratch)); // deref address
__ add(scratch, scratch, Operand(Code::kHeaderSize - kHeapObjectTag));
// Branch and link to JSEntryTrampoline.
__ Call(ip);
__ Call(scratch);
// Unlink this frame from the handler chain.
__ PopStackHandler();
@ -1125,9 +1108,9 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Restore the top frame descriptors from the stack.
__ pop(r3);
__ mov(ip,
Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
__ str(r3, MemOperand(ip));
__ mov(scratch, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
isolate())));
__ str(r3, MemOperand(scratch));
// Reset the stack to the callee saved registers.
__ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
@ -1228,8 +1211,8 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// write-barrier is needed.
__ bind(&megamorphic);
__ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
__ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
__ str(ip, FieldMemOperand(r5, FixedArray::kHeaderSize));
__ LoadRoot(r4, Heap::kmegamorphic_symbolRootIndex);
__ str(r4, FieldMemOperand(r5, FixedArray::kHeaderSize));
__ jmp(&done);
// An uninitialized cache is patched with the function
@ -1321,8 +1304,8 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
__ bind(&got_smi_index_);
// Check for index out of range.
__ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
__ cmp(ip, Operand(index_));
__ ldr(result_, FieldMemOperand(object_, String::kLengthOffset));
__ cmp(result_, Operand(index_));
__ b(ls, index_out_of_range_);
__ SmiUntag(index_);
@ -1487,37 +1470,6 @@ void StringHelper::GenerateOneByteCharsCompareLoop(
}
void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r1 : left
// -- r0 : right
// -- lr : return address
// -----------------------------------
// Load r2 with the allocation site. We stick an undefined dummy value here
// and replace it with the real allocation site later when we instantiate this
// stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
__ Move(r2, isolate()->factory()->undefined_value());
// Make sure that we actually patched the allocation site.
if (FLAG_debug_code) {
__ tst(r2, Operand(kSmiTagMask));
__ Assert(ne, kExpectedAllocationSite);
__ push(r2);
__ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kAllocationSiteMapRootIndex);
__ cmp(r2, ip);
__ pop(r2);
__ Assert(eq, kExpectedAllocationSite);
}
// Tail call into the stub that handles binary operations with allocation
// sites.
BinaryOpWithAllocationSiteStub stub(isolate(), state());
__ TailCallStub(&stub);
}
void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
DCHECK_EQ(CompareICState::BOOLEAN, state());
Label miss;
@ -1852,22 +1804,22 @@ void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
void CompareICStub::GenerateMiss(MacroAssembler* masm) {
Register scratch = r2;
{
// Call the runtime system in a fresh internal frame.
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r1, r0);
__ Push(lr, r1, r0);
__ mov(ip, Operand(Smi::FromInt(op())));
__ push(ip);
__ mov(scratch, Operand(Smi::FromInt(op())));
__ push(scratch);
__ CallRuntime(Runtime::kCompareIC_Miss);
// Compute the entry point of the rewritten stub.
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
__ add(scratch, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore registers.
__ pop(lr);
__ Pop(r1, r0);
}
__ Jump(r2);
__ Jump(scratch);
}
@ -1949,7 +1901,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
// Restore the properties.
__ ldr(properties,
FieldMemOperand(receiver, JSObject::kPropertiesOffset));
FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
}
const int spill_mask =
@ -1957,7 +1909,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
r2.bit() | r1.bit() | r0.bit());
__ stm(db_w, sp, spill_mask);
__ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
__ mov(r1, Operand(Handle<Name>(name)));
NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
__ CallStub(&stub);
@ -2148,7 +2100,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
int argument_count = 3;
__ PrepareCallCFunction(argument_count, regs_.scratch0());
__ PrepareCallCFunction(argument_count);
Register address =
r0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
DCHECK(!address.is(regs_.object()));
@ -2173,10 +2125,11 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode) {
Label on_black;
Label need_incremental;
Label need_incremental_pop_scratch;
#ifndef V8_CONCURRENT_MARKING
Label on_black;
// Let's look at the color of the object: If it is not black we don't have
// to inform the incremental marker.
__ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
@ -2190,6 +2143,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
}
__ bind(&on_black);
#endif
// Get the value from the slot.
__ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
@ -2238,20 +2192,16 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
// Fall through when we need to inform the incremental marker.
}
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
CEntryStub ces(isolate(), 1, kSaveFPRegs);
__ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
int parameter_count_offset =
StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
__ ldr(r1, MemOperand(fp, parameter_count_offset));
if (function_mode() == JS_FUNCTION_STUB_MODE) {
__ add(r1, r1, Operand(1));
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
Zone* zone) {
if (tasm->isolate()->function_entry_hook() != NULL) {
tasm->MaybeCheckConstPool();
PredictableCodeSizeScope predictable(tasm);
predictable.ExpectSize(tasm->CallStubSize() + 2 * Assembler::kInstrSize);
tasm->push(lr);
tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
tasm->pop(lr);
}
masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
__ mov(r1, Operand(r1, LSL, kPointerSizeLog2));
__ add(sp, sp, r1);
__ Ret();
}
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
@ -2259,8 +2209,7 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
ProfileEntryHookStub stub(masm->isolate());
masm->MaybeCheckConstPool();
PredictableCodeSizeScope predictable(masm);
predictable.ExpectSize(masm->CallStubSize(&stub) +
2 * Assembler::kInstrSize);
predictable.ExpectSize(masm->CallStubSize() + 2 * Assembler::kInstrSize);
__ push(lr);
__ CallStub(&stub);
__ pop(lr);
@ -2300,26 +2249,31 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
int frame_alignment = masm->ActivationFrameAlignment();
if (frame_alignment > kPointerSize) {
__ mov(r5, sp);
DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
__ and_(sp, sp, Operand(-frame_alignment));
}
#if V8_HOST_ARCH_ARM
int32_t entry_hook =
reinterpret_cast<int32_t>(isolate()->function_entry_hook());
__ mov(ip, Operand(entry_hook));
#else
// Under the simulator we need to indirect the entry hook through a
// trampoline function at a known address.
// It additionally takes an isolate as a third parameter
__ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
__ mov(ip, Operand(ExternalReference(&dispatcher,
ExternalReference::BUILTIN_CALL,
isolate())));
#if V8_HOST_ARCH_ARM
int32_t entry_hook =
reinterpret_cast<int32_t>(isolate()->function_entry_hook());
__ mov(scratch, Operand(entry_hook));
#else
// Under the simulator we need to indirect the entry hook through a
// trampoline function at a known address.
// It additionally takes an isolate as a third parameter
__ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
__ mov(scratch,
Operand(ExternalReference(
&dispatcher, ExternalReference::BUILTIN_CALL, isolate())));
#endif
__ Call(ip);
__ Call(scratch);
}
// Restore the stack pointer if needed.
if (frame_alignment > kPointerSize) {
@ -2338,8 +2292,8 @@ static void CreateArrayDispatch(MacroAssembler* masm,
T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
int last_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
int last_index =
GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= last_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
__ cmp(r3, Operand(kind));
@ -2362,24 +2316,12 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
// r0 - number of arguments
// r1 - constructor?
// sp[0] - last argument
Label normal_sequence;
if (mode == DONT_OVERRIDE) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
// is the low bit set? If so, we are holey and that is good.
__ tst(r3, Operand(1));
__ b(ne, &normal_sequence);
}
// look at the first argument
__ ldr(r5, MemOperand(sp, 0));
__ cmp(r5, Operand::Zero());
__ b(eq, &normal_sequence);
STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
STATIC_ASSERT(HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(PACKED_ELEMENTS == 2);
STATIC_ASSERT(HOLEY_ELEMENTS == 3);
STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS == 4);
STATIC_ASSERT(HOLEY_DOUBLE_ELEMENTS == 5);
if (mode == DISABLE_ALLOCATION_SITES) {
ElementsKind initial = GetInitialFastElementsKind();
@ -2389,13 +2331,12 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
holey_initial,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub_holey);
__ bind(&normal_sequence);
ArraySingleArgumentConstructorStub stub(masm->isolate(),
initial,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
// is the low bit set? If so, we are holey and that is good.
Label normal_sequence;
__ tst(r3, Operand(1));
__ b(ne, &normal_sequence);
// We are going to create a holey array, but our kind is non-holey.
// Fix kind and retry (only if we have an allocation site in the slot).
__ add(r3, r3, Operand(1));
@ -2410,13 +2351,15 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
// in the AllocationSite::transition_info field because elements kind is
// restricted to a portion of the field...upper bits need to be left alone.
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
__ ldr(r4, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
__ ldr(r4, FieldMemOperand(
r2, AllocationSite::kTransitionInfoOrBoilerplateOffset));
__ add(r4, r4, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
__ str(r4, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
__ str(r4, FieldMemOperand(
r2, AllocationSite::kTransitionInfoOrBoilerplateOffset));
__ bind(&normal_sequence);
int last_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
int last_index =
GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= last_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
__ cmp(r3, Operand(kind));
@ -2434,13 +2377,13 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
template<class T>
static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
int to_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
int to_index =
GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(isolate, kind);
stub.GetCode();
if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
if (AllocationSite::ShouldTrack(kind)) {
T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
stub1.GetCode();
}
@ -2454,7 +2397,7 @@ void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
isolate);
ArrayNArgumentsConstructorStub stub(isolate);
stub.GetCode();
ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
ElementsKind kinds[2] = {PACKED_ELEMENTS, HOLEY_ELEMENTS};
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things
InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
@ -2522,7 +2465,8 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
__ b(eq, &no_info);
__ ldr(r3, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset));
__ ldr(r3, FieldMemOperand(
r2, AllocationSite::kTransitionInfoOrBoilerplateOffset));
__ SmiUntag(r3);
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
__ and_(r3, r3, Operand(AllocationSite::ElementsKindBits::kMask));
@ -2596,21 +2540,21 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
if (FLAG_debug_code) {
Label done;
__ cmp(r3, Operand(FAST_ELEMENTS));
__ cmp(r3, Operand(PACKED_ELEMENTS));
__ b(eq, &done);
__ cmp(r3, Operand(FAST_HOLEY_ELEMENTS));
__ cmp(r3, Operand(HOLEY_ELEMENTS));
__ Assert(eq,
kInvalidElementsKindForInternalArrayOrInternalPackedArray);
__ bind(&done);
}
Label fast_elements_case;
__ cmp(r3, Operand(FAST_ELEMENTS));
__ cmp(r3, Operand(PACKED_ELEMENTS));
__ b(eq, &fast_elements_case);
GenerateCase(masm, FAST_HOLEY_ELEMENTS);
GenerateCase(masm, HOLEY_ELEMENTS);
__ bind(&fast_elements_case);
GenerateCase(masm, FAST_ELEMENTS);
GenerateCase(masm, PACKED_ELEMENTS);
}
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
@ -2666,7 +2610,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
if (FLAG_log_timer_events) {
FrameScope frame(masm, StackFrame::MANUAL);
__ PushSafepointRegisters();
__ PrepareCallCFunction(1, r0);
__ PrepareCallCFunction(1);
__ mov(r0, Operand(ExternalReference::isolate_address(isolate)));
__ CallCFunction(ExternalReference::log_enter_external_function(isolate),
1);
@ -2682,7 +2626,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
if (FLAG_log_timer_events) {
FrameScope frame(masm, StackFrame::MANUAL);
__ PushSafepointRegisters();
__ PrepareCallCFunction(1, r0);
__ PrepareCallCFunction(1);
__ mov(r0, Operand(ExternalReference::isolate_address(isolate)));
__ CallCFunction(ExternalReference::log_leave_external_function(isolate),
1);
@ -2707,8 +2651,8 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
}
__ sub(r6, r6, Operand(1));
__ str(r6, MemOperand(r9, kLevelOffset));
__ ldr(ip, MemOperand(r9, kLimitOffset));
__ cmp(r5, ip);
__ ldr(r6, MemOperand(r9, kLimitOffset));
__ cmp(r5, r6);
__ b(ne, &delete_allocated_handles);
// Leave the API exit frame.
@ -2727,8 +2671,8 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// Check if the function scheduled an exception.
__ LoadRoot(r4, Heap::kTheHoleValueRootIndex);
__ mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate)));
__ ldr(r5, MemOperand(ip));
__ mov(r6, Operand(ExternalReference::scheduled_exception_address(isolate)));
__ ldr(r5, MemOperand(r6));
__ cmp(r4, r5);
__ b(ne, &promote_scheduled_exception);
@ -2742,7 +2686,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ bind(&delete_allocated_handles);
__ str(r5, MemOperand(r9, kLimitOffset));
__ mov(r4, r0);
__ PrepareCallCFunction(1, r5);
__ PrepareCallCFunction(1);
__ mov(r0, Operand(ExternalReference::isolate_address(isolate)));
__ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
1);
@ -2798,20 +2742,22 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// call data
__ push(call_data);
Register scratch = call_data;
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
Register scratch0 = call_data;
Register scratch1 = r5;
__ LoadRoot(scratch0, Heap::kUndefinedValueRootIndex);
// return value
__ push(scratch);
__ push(scratch0);
// return value default
__ push(scratch);
__ push(scratch0);
// isolate
__ mov(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
__ push(scratch);
__ mov(scratch1,
Operand(ExternalReference::isolate_address(masm->isolate())));
__ push(scratch1);
// holder
__ push(holder);
// Prepare arguments.
__ mov(scratch, sp);
__ mov(scratch0, sp);
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
@ -2820,18 +2766,19 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
DCHECK(!api_function_address.is(r0) && !scratch.is(r0));
DCHECK(!api_function_address.is(r0) && !scratch0.is(r0));
// r0 = FunctionCallbackInfo&
// Arguments is after the return address.
__ add(r0, sp, Operand(1 * kPointerSize));
// FunctionCallbackInfo::implicit_args_
__ str(scratch, MemOperand(r0, 0 * kPointerSize));
__ str(scratch0, MemOperand(r0, 0 * kPointerSize));
// FunctionCallbackInfo::values_
__ add(ip, scratch, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
__ str(ip, MemOperand(r0, 1 * kPointerSize));
__ add(scratch1, scratch0,
Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
__ str(scratch1, MemOperand(r0, 1 * kPointerSize));
// FunctionCallbackInfo::length_ = argc
__ mov(ip, Operand(argc()));
__ str(ip, MemOperand(r0, 2 * kPointerSize));
__ mov(scratch0, Operand(argc()));
__ str(scratch0, MemOperand(r0, 2 * kPointerSize));
ExternalReference thunk_ref =
ExternalReference::invoke_function_callback(masm->isolate());

View File

@ -143,7 +143,8 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
__ ldr(temp1, MemOperand(src, 4, PostIndex));
__ str(temp1, MemOperand(dest, 4, PostIndex));
} else {
Register temp2 = ip;
UseScratchRegisterScope temps(&masm);
Register temp2 = temps.Acquire();
Label loop;
__ bic(temp2, chars, Operand(0x3), SetCC);
@ -167,7 +168,7 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
__ Ret();
CodeDesc desc;
masm.GetCode(&desc);
masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);
@ -219,8 +220,10 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
__ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest));
__ Ret();
} else {
UseScratchRegisterScope temps(&masm);
Register temp1 = r3;
Register temp2 = ip;
Register temp2 = temps.Acquire();
Register temp3 = lr;
Register temp4 = r4;
Label loop;
@ -256,7 +259,7 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
}
CodeDesc desc;
masm.GetCode(&desc);
masm.GetCode(isolate, &desc);
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
@ -284,7 +287,7 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
__ Ret();
CodeDesc desc;
masm.GetCode(&desc);
masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);

View File

@ -87,24 +87,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
void Deoptimizer::SetPlatformCompiledStubRegisters(
FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
ApiFunction function(descriptor->deoptimization_handler());
ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
int params = descriptor->GetHandlerParameterCount();
output_frame->SetRegister(r0.code(), params);
output_frame->SetRegister(r1.code(), handler);
}
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) {
Float64 double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
}
}
#define __ masm()->
// This code tries to be close to ia32 code so that any changes can be
@ -129,9 +111,11 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// We use a run-time check for VFP32DREGS.
CpuFeatureScope scope(masm(), VFP32DREGS,
CpuFeatureScope::kDontCheckSupported);
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
// Check CPU flags for number of registers, setting the Z condition flag.
__ CheckFor32DRegs(ip);
__ CheckFor32DRegs(scratch);
// Push registers d0-d15, and possibly d16-d31, on the stack.
// If d16-d31 are not pushed, decrease the stack pointer instead.
@ -148,8 +132,13 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// handle this a bit differently.
__ stm(db_w, sp, restored_regs | sp.bit() | lr.bit() | pc.bit());
__ mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
__ str(fp, MemOperand(ip));
{
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
__ mov(scratch, Operand(ExternalReference(
IsolateAddressId::kCEntryFPAddress, isolate())));
__ str(fp, MemOperand(scratch));
}
const int kSavedRegistersAreaSize =
(kNumberOfRegisters * kPointerSize) + kDoubleRegsSize + kFloatRegsSize;
@ -167,7 +156,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Allocate a new deoptimizer object.
// Pass four arguments in r0 to r3 and fifth argument on stack.
__ PrepareCallCFunction(6, r5);
__ PrepareCallCFunction(6);
__ mov(r0, Operand(0));
Label context_check;
__ ldr(r1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
@ -248,7 +237,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Compute the output frame in the deoptimizer.
__ push(r0); // Preserve deoptimizer object across call.
// r0: deoptimizer object; r1: scratch.
__ PrepareCallCFunction(1, r1);
__ PrepareCallCFunction(1);
// Call Deoptimizer::ComputeOutputFrames().
{
AllowExternalCallThatCantCauseGC scope(masm());
@ -311,15 +300,18 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Restore the registers from the stack.
__ ldm(ia_w, sp, restored_regs); // all but pc registers.
__ pop(ip); // remove sp
__ pop(ip); // remove lr
__ InitializeRootRegister();
__ pop(ip); // remove pc
__ pop(ip); // get continuation, leave pc on stack
__ pop(lr);
__ Jump(ip);
// Remove sp, lr and pc.
__ Drop(3);
{
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
__ pop(scratch); // get continuation, leave pc on stack
__ pop(lr);
__ Jump(scratch);
}
__ stop("Unreachable.");
}
@ -332,13 +324,15 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
// ARMv7, we can use movw (with a maximum immediate of 0xffff). On ARMv6, we
// need two instructions.
STATIC_ASSERT((kMaxNumberOfEntries - 1) <= 0xffff);
UseScratchRegisterScope temps(masm());
Register scratch = temps.Acquire();
if (CpuFeatures::IsSupported(ARMv7)) {
CpuFeatureScope scope(masm(), ARMv7);
Label done;
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
__ movw(ip, i);
__ movw(scratch, i);
__ b(&done);
DCHECK_EQ(table_entry_size_, masm()->pc_offset() - start);
}
@ -354,14 +348,14 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
__ mov(ip, Operand(i & 0xff)); // Set the low byte.
__ mov(scratch, Operand(i & 0xff)); // Set the low byte.
__ b(&high_fixes[i >> 8]); // Jump to the secondary table.
DCHECK_EQ(table_entry_size_, masm()->pc_offset() - start);
}
// Generate the secondary table, to set the high byte.
for (int high = 1; high <= high_fix_max; high++) {
__ bind(&high_fixes[high]);
__ orr(ip, ip, Operand(high << 8));
__ orr(scratch, scratch, Operand(high << 8));
// If this isn't the last entry, emit a branch to the end of the table.
// The last entry can just fall through.
if (high < high_fix_max) __ b(&high_fixes[0]);
@ -371,7 +365,7 @@ void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
// through with no additional branch.
__ bind(&high_fixes[0]);
}
__ push(ip);
__ push(scratch);
}

View File

@ -343,7 +343,6 @@ int Decoder::FormatRegister(Instruction* instr, const char* format) {
return 5;
}
UNREACHABLE();
return -1;
}
@ -416,8 +415,8 @@ void Decoder::FormatNeonList(int Vd, int type) {
void Decoder::FormatNeonMemory(int Rn, int align, int Rm) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"[r%d", Rn);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "[%s",
converter_.NameOfCPURegister(Rn));
if (align != 0) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
":%d", (1 << align) << 6);
@ -427,8 +426,8 @@ void Decoder::FormatNeonMemory(int Rn, int align, int Rm) {
} else if (Rm == 13) {
Print("]!");
} else {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"], r%d", Rm);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "], %s",
converter_.NameOfCPURegister(Rm));
}
}
@ -686,7 +685,8 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
return -1;
}
}
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%p", addr);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%p",
static_cast<void*>(addr));
return 1;
}
case 'S':
@ -705,7 +705,6 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
}
}
UNREACHABLE();
return -1;
}
@ -1559,6 +1558,7 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
(instr->VAValue() == 0x0)) {
DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
} else if ((instr->VLValue() == 0x0) && (instr->VCValue() == 0x1)) {
const char* rt_name = converter_.NameOfCPURegister(instr->RtValue());
if (instr->Bit(23) == 0) {
int opc1_opc2 = (instr->Bits(22, 21) << 2) | instr->Bits(6, 5);
if ((opc1_opc2 & 0xb) == 0) {
@ -1570,31 +1570,30 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
}
} else {
int vd = instr->VFPNRegValue(kDoublePrecision);
int rt = instr->RtValue();
if ((opc1_opc2 & 0x8) != 0) {
// NeonS8 / NeonU8
int i = opc1_opc2 & 0x7;
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vmov.8 d%d[%d], r%d", vd, i, rt);
"vmov.8 d%d[%d], %s", vd, i, rt_name);
} else if ((opc1_opc2 & 0x1) != 0) {
// NeonS16 / NeonU16
int i = (opc1_opc2 >> 1) & 0x3;
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vmov.16 d%d[%d], r%d", vd, i, rt);
"vmov.16 d%d[%d], %s", vd, i, rt_name);
} else {
Unknown(instr);
}
}
} else {
int size = 32;
if (instr->Bit(5) != 0)
if (instr->Bit(5) != 0) {
size = 16;
else if (instr->Bit(22) != 0)
} else if (instr->Bit(22) != 0) {
size = 8;
}
int Vd = instr->VFPNRegValue(kSimd128Precision);
int Rt = instr->RtValue();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vdup.%i q%d, r%d", size, Vd, Rt);
"vdup.%i q%d, %s", size, Vd, rt_name);
}
} else if ((instr->VLValue() == 0x1) && (instr->VCValue() == 0x1)) {
int opc1_opc2 = (instr->Bits(22, 21) << 2) | instr->Bits(6, 5);
@ -1607,19 +1606,20 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
}
} else {
char sign = instr->Bit(23) != 0 ? 'u' : 's';
int rt = instr->RtValue();
const char* rt_name = converter_.NameOfCPURegister(instr->RtValue());
int vn = instr->VFPNRegValue(kDoublePrecision);
if ((opc1_opc2 & 0x8) != 0) {
// NeonS8 / NeonU8
int i = opc1_opc2 & 0x7;
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vmov.%c8 r%d, d%d[%d]", sign, rt, vn, i);
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vmov.%c8 %s, d%d[%d]",
sign, rt_name, vn, i);
} else if ((opc1_opc2 & 0x1) != 0) {
// NeonS16 / NeonU16
int i = (opc1_opc2 >> 1) & 0x3;
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vmov.%c16 r%d, d%d[%d]",
sign, rt, vn, i);
SNPrintF(out_buffer_ + out_buffer_pos_, "vmov.%c16 %s, d%d[%d]",
sign, rt_name, vn, i);
} else {
Unknown(instr);
}
@ -2424,17 +2424,17 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
case 0xA:
case 0xB:
if ((instr->Bits(22, 20) == 5) && (instr->Bits(15, 12) == 0xf)) {
int Rn = instr->Bits(19, 16);
const char* rn_name = converter_.NameOfCPURegister(instr->Bits(19, 16));
int offset = instr->Bits(11, 0);
if (offset == 0) {
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "pld [r%d]", Rn);
SNPrintF(out_buffer_ + out_buffer_pos_, "pld [%s]", rn_name);
} else if (instr->Bit(23) == 0) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"pld [r%d, #-%d]", Rn, offset);
"pld [%s, #-%d]", rn_name, offset);
} else {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"pld [r%d, #+%d]", Rn, offset);
"pld [%s, #+%d]", rn_name, offset);
}
} else if (instr->SpecialValue() == 0xA && instr->Bits(22, 20) == 7) {
int option = instr->Bits(3, 0);

View File

@ -21,15 +21,6 @@ Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
Register JavaScriptFrame::constant_pool_pointer_register() {
UNREACHABLE();
return no_reg;
}
Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
Register StubFailureTrampolineFrame::context_register() { return cp; }
Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
UNREACHABLE();
return no_reg;
}

View File

@ -49,6 +49,8 @@ const Register StoreTransitionDescriptor::MapRegister() { return r5; }
const Register StringCompareDescriptor::LeftRegister() { return r1; }
const Register StringCompareDescriptor::RightRegister() { return r0; }
const Register StringConcatDescriptor::ArgumentsCountRegister() { return r0; }
const Register ApiGetterDescriptor::HolderRegister() { return r0; }
const Register ApiGetterDescriptor::CallbackRegister() { return r3; }
@ -155,6 +157,16 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r0 : number of arguments (on the stack, not including receiver)
// r1 : the target to call
// r2 : arguments list (FixedArray)
// r4 : arguments list length (untagged)
Register registers[] = {r1, r0, r2, r4};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallForwardVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r0 : number of arguments
@ -164,6 +176,34 @@ void CallForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallWithSpreadDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r0 : number of arguments (on the stack, not including receiver)
// r1 : the target to call
// r2 : the object to spread
Register registers[] = {r1, r0, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r1 : the target to call
// r2 : the arguments list
Register registers[] = {r1, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ConstructVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r0 : number of arguments (on the stack, not including receiver)
// r1 : the target to call
// r3 : the new target
// r2 : arguments list (FixedArray)
// r4 : arguments list length (untagged)
Register registers[] = {r1, r3, r0, r2, r4};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r0 : number of arguments
@ -174,6 +214,25 @@ void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r0 : number of arguments (on the stack, not including receiver)
// r1 : the target to call
// r3 : the new target
// r2 : the object to spread
Register registers[] = {r1, r3, r0, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r1 : the target to call
// r3 : the new target
// r2 : the arguments list
Register registers[] = {r1, r3, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ConstructStubDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r0 : number of arguments
@ -378,8 +437,7 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
Register registers[] = {
r0, // the value to pass to the generator
r1, // the JSGeneratorObject to resume
r2, // the resume mode (tagged)
r3, // SuspendFlags (tagged)
r2 // the resume mode (tagged)
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -3225,7 +3225,6 @@ void Simulator::DecodeType7(Instruction* instr) {
void Simulator::DecodeTypeVFP(Instruction* instr) {
DCHECK((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
DCHECK(instr->Bits(11, 9) == 0x5);
// Obtain single precision register codes.
int m = instr->VFPMRegValue(kSinglePrecision);
int d = instr->VFPDRegValue(kSinglePrecision);
@ -3749,7 +3748,6 @@ bool get_inv_op_vfp_flag(VFPRoundingMode mode,
(val <= (min_int - 1.0));
default:
UNREACHABLE();
return true;
}
}

View File

@ -16,7 +16,7 @@ namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return true; }
bool CpuFeatures::SupportsWasmSimd128() { return false; }
bool CpuFeatures::SupportsWasmSimd128() { return true; }
void RelocInfo::apply(intptr_t delta) {
// On arm64 only internal references need extra work.
@ -57,6 +57,15 @@ inline int CPURegister::SizeInBytes() const {
return reg_size / 8;
}
inline bool CPURegister::Is8Bits() const {
DCHECK(IsValid());
return reg_size == 8;
}
inline bool CPURegister::Is16Bits() const {
DCHECK(IsValid());
return reg_size == 16;
}
inline bool CPURegister::Is32Bits() const {
DCHECK(IsValid());
@ -69,9 +78,13 @@ inline bool CPURegister::Is64Bits() const {
return reg_size == 64;
}
inline bool CPURegister::Is128Bits() const {
DCHECK(IsValid());
return reg_size == 128;
}
inline bool CPURegister::IsValid() const {
if (IsValidRegister() || IsValidFPRegister()) {
if (IsValidRegister() || IsValidVRegister()) {
DCHECK(!IsNone());
return true;
} else {
@ -87,14 +100,14 @@ inline bool CPURegister::IsValidRegister() const {
((reg_code < kNumberOfRegisters) || (reg_code == kSPRegInternalCode));
}
inline bool CPURegister::IsValidFPRegister() const {
return IsFPRegister() &&
((reg_size == kSRegSizeInBits) || (reg_size == kDRegSizeInBits)) &&
(reg_code < kNumberOfFPRegisters);
inline bool CPURegister::IsValidVRegister() const {
return IsVRegister() &&
((reg_size == kBRegSizeInBits) || (reg_size == kHRegSizeInBits) ||
(reg_size == kSRegSizeInBits) || (reg_size == kDRegSizeInBits) ||
(reg_size == kQRegSizeInBits)) &&
(reg_code < kNumberOfVRegisters);
}
inline bool CPURegister::IsNone() const {
// kNoRegister types should always have size 0 and code 0.
DCHECK((reg_type != kNoRegister) || (reg_code == 0));
@ -120,11 +133,7 @@ inline bool CPURegister::IsRegister() const {
return reg_type == kRegister;
}
inline bool CPURegister::IsFPRegister() const {
return reg_type == kFPRegister;
}
inline bool CPURegister::IsVRegister() const { return reg_type == kVRegister; }
inline bool CPURegister::IsSameSizeAndType(const CPURegister& other) const {
return (reg_size == other.reg_size) && (reg_type == other.reg_type);
@ -200,7 +209,7 @@ inline Register Register::XRegFromCode(unsigned code) {
if (code == kSPRegInternalCode) {
return csp;
} else {
DCHECK(code < kNumberOfRegisters);
DCHECK_LT(code, static_cast<unsigned>(kNumberOfRegisters));
return Register::Create(code, kXRegSizeInBits);
}
}
@ -210,23 +219,40 @@ inline Register Register::WRegFromCode(unsigned code) {
if (code == kSPRegInternalCode) {
return wcsp;
} else {
DCHECK(code < kNumberOfRegisters);
DCHECK_LT(code, static_cast<unsigned>(kNumberOfRegisters));
return Register::Create(code, kWRegSizeInBits);
}
}
inline FPRegister FPRegister::SRegFromCode(unsigned code) {
DCHECK(code < kNumberOfFPRegisters);
return FPRegister::Create(code, kSRegSizeInBits);
inline VRegister VRegister::BRegFromCode(unsigned code) {
DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
return VRegister::Create(code, kBRegSizeInBits);
}
inline FPRegister FPRegister::DRegFromCode(unsigned code) {
DCHECK(code < kNumberOfFPRegisters);
return FPRegister::Create(code, kDRegSizeInBits);
inline VRegister VRegister::HRegFromCode(unsigned code) {
DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
return VRegister::Create(code, kHRegSizeInBits);
}
inline VRegister VRegister::SRegFromCode(unsigned code) {
DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
return VRegister::Create(code, kSRegSizeInBits);
}
inline VRegister VRegister::DRegFromCode(unsigned code) {
DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
return VRegister::Create(code, kDRegSizeInBits);
}
inline VRegister VRegister::QRegFromCode(unsigned code) {
DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
return VRegister::Create(code, kQRegSizeInBits);
}
inline VRegister VRegister::VRegFromCode(unsigned code) {
DCHECK_LT(code, static_cast<unsigned>(kNumberOfVRegisters));
return VRegister::Create(code, kVRegSizeInBits);
}
inline Register CPURegister::W() const {
DCHECK(IsValidRegister());
@ -239,16 +265,34 @@ inline Register CPURegister::X() const {
return Register::XRegFromCode(reg_code);
}
inline FPRegister CPURegister::S() const {
DCHECK(IsValidFPRegister());
return FPRegister::SRegFromCode(reg_code);
inline VRegister CPURegister::V() const {
DCHECK(IsValidVRegister());
return VRegister::VRegFromCode(reg_code);
}
inline VRegister CPURegister::B() const {
DCHECK(IsValidVRegister());
return VRegister::BRegFromCode(reg_code);
}
inline FPRegister CPURegister::D() const {
DCHECK(IsValidFPRegister());
return FPRegister::DRegFromCode(reg_code);
inline VRegister CPURegister::H() const {
DCHECK(IsValidVRegister());
return VRegister::HRegFromCode(reg_code);
}
inline VRegister CPURegister::S() const {
DCHECK(IsValidVRegister());
return VRegister::SRegFromCode(reg_code);
}
inline VRegister CPURegister::D() const {
DCHECK(IsValidVRegister());
return VRegister::DRegFromCode(reg_code);
}
inline VRegister CPURegister::Q() const {
DCHECK(IsValidVRegister());
return VRegister::QRegFromCode(reg_code);
}
@ -310,7 +354,6 @@ Immediate::Immediate(T t, RelocInfo::Mode rmode)
STATIC_ASSERT(ImmediateInitializer<T>::kIsIntType);
}
// Operand.
template<typename T>
Operand::Operand(Handle<T> value) : immediate_(value), reg_(NoReg) {}
@ -325,7 +368,6 @@ Operand::Operand(T t, RelocInfo::Mode rmode)
: immediate_(t, rmode),
reg_(NoReg) {}
Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
: immediate_(0),
reg_(reg),
@ -352,9 +394,21 @@ Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
DCHECK(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX)));
}
bool Operand::IsHeapObjectRequest() const {
DCHECK_IMPLIES(heap_object_request_.has_value(), reg_.Is(NoReg));
DCHECK_IMPLIES(heap_object_request_.has_value(),
immediate_.rmode() == RelocInfo::EMBEDDED_OBJECT ||
immediate_.rmode() == RelocInfo::CODE_TARGET);
return heap_object_request_.has_value();
}
HeapObjectRequest Operand::heap_object_request() const {
DCHECK(IsHeapObjectRequest());
return *heap_object_request_;
}
bool Operand::IsImmediate() const {
return reg_.Is(NoReg);
return reg_.Is(NoReg) && !IsHeapObjectRequest();
}
@ -383,6 +437,13 @@ Operand Operand::ToExtendedRegister() const {
return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);
}
Immediate Operand::immediate_for_heap_object_request() const {
DCHECK((heap_object_request().kind() == HeapObjectRequest::kHeapNumber &&
immediate_.rmode() == RelocInfo::EMBEDDED_OBJECT) ||
(heap_object_request().kind() == HeapObjectRequest::kCodeStub &&
immediate_.rmode() == RelocInfo::CODE_TARGET));
return immediate_;
}
Immediate Operand::immediate() const {
DCHECK(IsImmediate());
@ -491,7 +552,7 @@ MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
regoffset_ = NoReg;
} else if (offset.IsShiftedRegister()) {
DCHECK(addrmode == Offset);
DCHECK((addrmode == Offset) || (addrmode == PostIndex));
regoffset_ = offset.reg();
shift_ = offset.shift();
@ -877,21 +938,20 @@ LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) {
if (rt.IsRegister()) {
return rt.Is64Bits() ? LDR_x : LDR_w;
} else {
DCHECK(rt.IsFPRegister());
return rt.Is64Bits() ? LDR_d : LDR_s;
}
}
LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt,
const CPURegister& rt2) {
DCHECK(AreSameSizeAndType(rt, rt2));
USE(rt2);
if (rt.IsRegister()) {
return rt.Is64Bits() ? LDP_x : LDP_w;
} else {
DCHECK(rt.IsFPRegister());
return rt.Is64Bits() ? LDP_d : LDP_s;
DCHECK(rt.IsVRegister());
switch (rt.SizeInBits()) {
case kBRegSizeInBits:
return LDR_b;
case kHRegSizeInBits:
return LDR_h;
case kSRegSizeInBits:
return LDR_s;
case kDRegSizeInBits:
return LDR_d;
default:
DCHECK(rt.IsQ());
return LDR_q;
}
}
}
@ -901,11 +961,29 @@ LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) {
if (rt.IsRegister()) {
return rt.Is64Bits() ? STR_x : STR_w;
} else {
DCHECK(rt.IsFPRegister());
return rt.Is64Bits() ? STR_d : STR_s;
DCHECK(rt.IsVRegister());
switch (rt.SizeInBits()) {
case kBRegSizeInBits:
return STR_b;
case kHRegSizeInBits:
return STR_h;
case kSRegSizeInBits:
return STR_s;
case kDRegSizeInBits:
return STR_d;
default:
DCHECK(rt.IsQ());
return STR_q;
}
}
}
LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt,
const CPURegister& rt2) {
DCHECK_EQ(STP_w | LoadStorePairLBit, LDP_w);
return static_cast<LoadStorePairOp>(StorePairOpFor(rt, rt2) |
LoadStorePairLBit);
}
LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
const CPURegister& rt2) {
@ -914,8 +992,16 @@ LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
if (rt.IsRegister()) {
return rt.Is64Bits() ? STP_x : STP_w;
} else {
DCHECK(rt.IsFPRegister());
return rt.Is64Bits() ? STP_d : STP_s;
DCHECK(rt.IsVRegister());
switch (rt.SizeInBits()) {
case kSRegSizeInBits:
return STP_s;
case kDRegSizeInBits:
return STP_d;
default:
DCHECK(rt.IsQ());
return STP_q;
}
}
}
@ -924,7 +1010,7 @@ LoadLiteralOp Assembler::LoadLiteralOpFor(const CPURegister& rt) {
if (rt.IsRegister()) {
return rt.Is64Bits() ? LDR_x_lit : LDR_w_lit;
} else {
DCHECK(rt.IsFPRegister());
DCHECK(rt.IsVRegister());
return rt.Is64Bits() ? LDR_d_lit : LDR_s_lit;
}
}
@ -945,7 +1031,6 @@ Instr Assembler::Flags(FlagsUpdate S) {
return 0 << FlagsUpdate_offset;
}
UNREACHABLE();
return 0;
}
@ -1108,9 +1193,8 @@ Instr Assembler::ImmLS(int imm9) {
return truncate_to_int9(imm9) << ImmLS_offset;
}
Instr Assembler::ImmLSPair(int imm7, LSDataSize size) {
DCHECK(((imm7 >> size) << size) == imm7);
Instr Assembler::ImmLSPair(int imm7, unsigned size) {
DCHECK_EQ((imm7 >> size) << size, imm7);
int scaled_imm7 = imm7 >> size;
DCHECK(is_int7(scaled_imm7));
return truncate_to_int7(scaled_imm7) << ImmLSPair_offset;
@ -1152,10 +1236,17 @@ Instr Assembler::ImmBarrierType(int imm2) {
return imm2 << ImmBarrierType_offset;
}
LSDataSize Assembler::CalcLSDataSize(LoadStoreOp op) {
DCHECK((SizeLS_offset + SizeLS_width) == (kInstructionSize * 8));
return static_cast<LSDataSize>(op >> SizeLS_offset);
unsigned Assembler::CalcLSDataSize(LoadStoreOp op) {
DCHECK((LSSize_offset + LSSize_width) == (kInstructionSize * 8));
unsigned size = static_cast<Instr>(op >> LSSize_offset);
if ((op & LSVector_mask) != 0) {
// Vector register memory operations encode the access size in the "size"
// and "opc" fields.
if ((size == 0) && ((op & LSOpc_mask) >> LSOpc_offset) >= 2) {
size = kQRegSizeLog2;
}
}
return size;
}
@ -1170,11 +1261,7 @@ Instr Assembler::ShiftMoveWide(int shift) {
return shift << ShiftMoveWide_offset;
}
Instr Assembler::FPType(FPRegister fd) {
return fd.Is64Bits() ? FP64 : FP32;
}
Instr Assembler::FPType(VRegister fd) { return fd.Is64Bits() ? FP64 : FP32; }
Instr Assembler::FPScale(unsigned scale) {
DCHECK(is_uint6(scale));
@ -1205,18 +1292,6 @@ inline void Assembler::CheckBuffer() {
}
}
TypeFeedbackId Assembler::RecordedAstId() {
DCHECK(!recorded_ast_id_.IsNone());
return recorded_ast_id_;
}
void Assembler::ClearRecordedAstId() {
recorded_ast_id_ = TypeFeedbackId::None();
}
} // namespace internal
} // namespace v8

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -38,32 +38,6 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kNewArray);
}
void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
ExternalReference miss) {
// Update the static counter each time a new code stub is generated.
isolate()->counters()->code_stubs()->Increment();
CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
int param_count = descriptor.GetRegisterParameterCount();
{
// Call the runtime system in a fresh internal frame.
FrameScope scope(masm, StackFrame::INTERNAL);
DCHECK((param_count == 0) ||
x0.Is(descriptor.GetRegisterParameter(param_count - 1)));
// Push arguments
MacroAssembler::PushPopQueue queue(masm);
for (int i = 0; i < param_count; ++i) {
queue.Queue(descriptor.GetRegisterParameter(i));
}
queue.PushQueued();
__ CallExternalReference(miss, param_count);
}
__ Ret();
}
void DoubleToIStub::Generate(MacroAssembler* masm) {
Label done;
@ -147,8 +121,8 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// See call site for description.
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Register left,
Register right, Register scratch,
FPRegister double_scratch,
Label* slow, Condition cond) {
VRegister double_scratch, Label* slow,
Condition cond) {
DCHECK(!AreAliased(left, right, scratch));
Label not_identical, return_equal, heap_number;
Register result = x0;
@ -292,12 +266,9 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
// See call site for description.
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Register left,
Register right,
FPRegister left_d,
FPRegister right_d,
Label* slow,
static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register left,
Register right, VRegister left_d,
VRegister right_d, Label* slow,
bool strict) {
DCHECK(!AreAliased(left_d, right_d));
DCHECK((left.is(x0) && right.is(x1)) ||
@ -476,8 +447,8 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
// In case 3, we have found out that we were dealing with a number-number
// comparison. The double values of the numbers have been loaded, right into
// rhs_d, left into lhs_d.
FPRegister rhs_d = d0;
FPRegister lhs_d = d1;
VRegister rhs_d = d0;
VRegister lhs_d = d1;
EmitSmiNonsmiComparison(masm, lhs, rhs, lhs_d, rhs_d, &slow, strict());
__ Bind(&both_loaded_as_doubles);
@ -613,7 +584,7 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
CPURegList saved_regs = kCallerSaved;
CPURegList saved_fp_regs = kCallerSavedFP;
CPURegList saved_fp_regs = kCallerSavedV;
// We don't allow a GC during a store buffer overflow so there is no need to
// store the registers in any particular way, but we do have to store and
@ -686,12 +657,12 @@ void MathPowStub::Generate(MacroAssembler* masm) {
Register exponent_integer = MathPowIntegerDescriptor::exponent();
DCHECK(exponent_integer.is(x12));
Register saved_lr = x19;
FPRegister result_double = d0;
FPRegister base_double = d0;
FPRegister exponent_double = d1;
FPRegister base_double_copy = d2;
FPRegister scratch1_double = d6;
FPRegister scratch0_double = d7;
VRegister result_double = d0;
VRegister base_double = d0;
VRegister exponent_double = d1;
VRegister base_double_copy = d2;
VRegister scratch1_double = d6;
VRegister scratch0_double = d7;
// A fast-path for integer exponents.
Label exponent_is_smi, exponent_is_integer;
@ -803,14 +774,11 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
// CEntryStub.
CEntryStub::GenerateAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
CreateWeakCellStub::GenerateAheadOfTime(isolate);
BinaryOpICStub::GenerateAheadOfTime(isolate);
StoreRegistersStateStub::GenerateAheadOfTime(isolate);
RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
}
@ -1046,15 +1014,15 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ Bind(&exception_returned);
ExternalReference pending_handler_context_address(
Isolate::kPendingHandlerContextAddress, isolate());
IsolateAddressId::kPendingHandlerContextAddress, isolate());
ExternalReference pending_handler_code_address(
Isolate::kPendingHandlerCodeAddress, isolate());
IsolateAddressId::kPendingHandlerCodeAddress, isolate());
ExternalReference pending_handler_offset_address(
Isolate::kPendingHandlerOffsetAddress, isolate());
IsolateAddressId::kPendingHandlerOffsetAddress, isolate());
ExternalReference pending_handler_fp_address(
Isolate::kPendingHandlerFPAddress, isolate());
IsolateAddressId::kPendingHandlerFPAddress, isolate());
ExternalReference pending_handler_sp_address(
Isolate::kPendingHandlerSPAddress, isolate());
IsolateAddressId::kPendingHandlerSPAddress, isolate());
// Ask the runtime for help to determine the handler. This will set x0 to
// contain the current pending exception, don't clobber it.
@ -1142,7 +1110,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
int64_t bad_frame_pointer = -1L; // Bad frame pointer to fail if it is used.
__ Mov(x13, bad_frame_pointer);
__ Mov(x12, StackFrame::TypeToMarker(marker));
__ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate()));
__ Mov(x11, ExternalReference(IsolateAddressId::kCEntryFPAddress, isolate()));
__ Ldr(x10, MemOperand(x11));
__ Push(x13, x12, xzr, x10);
@ -1152,7 +1120,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Push the JS entry frame marker. Also set js_entry_sp if this is the
// outermost JS call.
Label non_outermost_js, done;
ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
ExternalReference js_entry_sp(IsolateAddressId::kJSEntrySPAddress, isolate());
__ Mov(x10, ExternalReference(js_entry_sp));
__ Ldr(x11, MemOperand(x10));
__ Cbnz(x11, &non_outermost_js);
@ -1191,8 +1159,8 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// field in the JSEnv and return a failure sentinel. Coming in here the
// fp will be invalid because the PushTryHandler below sets it to 0 to
// signal the existence of the JSEntry frame.
__ Mov(x10, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate())));
__ Mov(x10, Operand(ExternalReference(
IsolateAddressId::kPendingExceptionAddress, isolate())));
}
__ Str(code_entry, MemOperand(x10));
__ LoadRoot(x0, Heap::kExceptionRootIndex);
@ -1252,7 +1220,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Restore the top frame descriptors from the stack.
__ Pop(x10);
__ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate()));
__ Mov(x11, ExternalReference(IsolateAddressId::kCEntryFPAddress, isolate()));
__ Str(x10, MemOperand(x11));
// Reset the stack to the callee saved registers.
@ -1582,8 +1550,8 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
Register result = x0;
Register rhs = x0;
Register lhs = x1;
FPRegister rhs_d = d0;
FPRegister lhs_d = d1;
VRegister rhs_d = d0;
VRegister lhs_d = d1;
if (left() == CompareICState::SMI) {
__ JumpIfNotSmi(lhs, &miss);
@ -2009,32 +1977,6 @@ void StringHelper::GenerateOneByteCharsCompareLoop(
}
void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x1 : left
// -- x0 : right
// -- lr : return address
// -----------------------------------
// Load x2 with the allocation site. We stick an undefined dummy value here
// and replace it with the real allocation site later when we instantiate this
// stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
__ LoadObject(x2, handle(isolate()->heap()->undefined_value()));
// Make sure that we actually patched the allocation site.
if (FLAG_debug_code) {
__ AssertNotSmi(x2, kExpectedAllocationSite);
__ Ldr(x10, FieldMemOperand(x2, HeapObject::kMapOffset));
__ AssertRegisterIsRoot(x10, Heap::kAllocationSiteMapRootIndex,
kExpectedAllocationSite);
}
// Tail call into the stub that handles binary operations with allocation
// sites.
BinaryOpWithAllocationSiteStub stub(isolate(), state());
__ TailCallStub(&stub);
}
RecordWriteStub::RegisterAllocation::RegisterAllocation(Register object,
Register address,
Register scratch)
@ -2042,7 +1984,7 @@ RecordWriteStub::RegisterAllocation::RegisterAllocation(Register object,
address_(address),
scratch0_(scratch),
saved_regs_(kCallerSaved),
saved_fp_regs_(kCallerSavedFP) {
saved_fp_regs_(kCallerSavedV) {
DCHECK(!AreAliased(scratch, object, address));
// The SaveCallerSaveRegisters method needs to save caller-saved
@ -2131,10 +2073,11 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode) {
Label on_black;
Label need_incremental;
Label need_incremental_pop_scratch;
#ifndef V8_CONCURRENT_MARKING
Label on_black;
// If the object is not black we don't have to inform the incremental marker.
__ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
@ -2148,6 +2091,8 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
}
__ Bind(&on_black);
#endif
// Get the value from the slot.
Register val = regs_.scratch0();
__ Ldr(val, MemOperand(regs_.address()));
@ -2225,26 +2170,25 @@ void RecordWriteStub::Generate(MacroAssembler* masm) {
}
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
CEntryStub ces(isolate(), 1, kSaveFPRegs);
__ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
int parameter_count_offset =
StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
__ Ldr(x1, MemOperand(fp, parameter_count_offset));
if (function_mode() == JS_FUNCTION_STUB_MODE) {
__ Add(x1, x1, 1);
}
masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
__ Drop(x1);
// Return to IC Miss stub, continuation still on stack.
__ Ret();
}
// The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
// a "Push lr" instruction, followed by a call.
static const unsigned int kProfileEntryHookCallSize =
Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
Zone* zone) {
if (tasm->isolate()->function_entry_hook() != NULL) {
Assembler::BlockConstPoolScope no_const_pools(tasm);
DontEmitDebugCodeScope no_debug_code(tasm);
Label entry_hook_call_start;
tasm->Bind(&entry_hook_call_start);
tasm->Push(lr);
tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
DCHECK(tasm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
kProfileEntryHookCallSize);
tasm->Pop(lr);
}
}
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
@ -2257,7 +2201,6 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
__ CallStub(&stub);
DCHECK(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
kProfileEntryHookCallSize);
__ Pop(lr);
}
}
@ -2397,7 +2340,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ PushCPURegList(spill_list);
__ Ldr(x0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ Ldr(x0, FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
__ Mov(x1, Operand(name));
NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
__ CallStub(&stub);
@ -2543,23 +2486,12 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
Register allocation_site = x2;
Register kind = x3;
Label normal_sequence;
if (mode == DONT_OVERRIDE) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4);
STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
// Is the low bit set? If so, the array is holey.
__ Tbnz(kind, 0, &normal_sequence);
}
// Look at the last argument.
// TODO(jbramley): What does a 0 argument represent?
__ Peek(x10, 0);
__ Cbz(x10, &normal_sequence);
STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0);
STATIC_ASSERT(HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(PACKED_ELEMENTS == 2);
STATIC_ASSERT(HOLEY_ELEMENTS == 3);
STATIC_ASSERT(PACKED_DOUBLE_ELEMENTS == 4);
STATIC_ASSERT(HOLEY_DOUBLE_ELEMENTS == 5);
if (mode == DISABLE_ALLOCATION_SITES) {
ElementsKind initial = GetInitialFastElementsKind();
@ -2569,13 +2501,11 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
holey_initial,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub_holey);
__ Bind(&normal_sequence);
ArraySingleArgumentConstructorStub stub(masm->isolate(),
initial,
DISABLE_ALLOCATION_SITES);
__ TailCallStub(&stub);
} else if (mode == DONT_OVERRIDE) {
// Is the low bit set? If so, the array is holey.
Label normal_sequence;
__ Tbnz(kind, 0, &normal_sequence);
// We are going to create a holey array, but our kind is non-holey.
// Fix kind and retry (only if we have an allocation site in the slot).
__ Orr(kind, kind, 1);
@ -2591,11 +2521,13 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
// in the AllocationSite::transition_info field because elements kind is
// restricted to a portion of the field; upper bits need to be left alone.
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
__ Ldr(x11, FieldMemOperand(allocation_site,
AllocationSite::kTransitionInfoOffset));
__ Ldr(x11,
FieldMemOperand(allocation_site,
AllocationSite::kTransitionInfoOrBoilerplateOffset));
__ Add(x11, x11, Smi::FromInt(kFastElementsKindPackedToHoley));
__ Str(x11, FieldMemOperand(allocation_site,
AllocationSite::kTransitionInfoOffset));
__ Str(x11,
FieldMemOperand(allocation_site,
AllocationSite::kTransitionInfoOrBoilerplateOffset));
__ Bind(&normal_sequence);
int last_index =
@ -2619,13 +2551,13 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
template<class T>
static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
int to_index = GetSequenceIndexFromFastElementsKind(
TERMINAL_FAST_ELEMENTS_KIND);
int to_index =
GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(isolate, kind);
stub.GetCode();
if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
if (AllocationSite::ShouldTrack(kind)) {
T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
stub1.GetCode();
}
@ -2639,7 +2571,7 @@ void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
isolate);
ArrayNArgumentsConstructorStub stub(isolate);
stub.GetCode();
ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
ElementsKind kinds[2] = {PACKED_ELEMENTS, HOLEY_ELEMENTS};
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things
InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
@ -2718,9 +2650,9 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Get the elements kind and case on that.
__ JumpIfRoot(allocation_site, Heap::kUndefinedValueRootIndex, &no_info);
__ Ldrsw(kind,
UntagSmiFieldMemOperand(allocation_site,
AllocationSite::kTransitionInfoOffset));
__ Ldrsw(kind, UntagSmiFieldMemOperand(
allocation_site,
AllocationSite::kTransitionInfoOrBoilerplateOffset));
__ And(kind, kind, AllocationSite::ElementsKindBits::kMask);
GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
@ -2809,17 +2741,17 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
if (FLAG_debug_code) {
Label done;
__ Cmp(x3, FAST_ELEMENTS);
__ Ccmp(x3, FAST_HOLEY_ELEMENTS, ZFlag, ne);
__ Cmp(x3, PACKED_ELEMENTS);
__ Ccmp(x3, HOLEY_ELEMENTS, ZFlag, ne);
__ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
}
Label fast_elements_case;
__ CompareAndBranch(kind, FAST_ELEMENTS, eq, &fast_elements_case);
GenerateCase(masm, FAST_HOLEY_ELEMENTS);
__ CompareAndBranch(kind, PACKED_ELEMENTS, eq, &fast_elements_case);
GenerateCase(masm, HOLEY_ELEMENTS);
__ Bind(&fast_elements_case);
GenerateCase(masm, FAST_ELEMENTS);
GenerateCase(masm, PACKED_ELEMENTS);
}
// The number of register that CallApiFunctionAndReturn will need to save on

File diff suppressed because it is too large Load Diff

View File

@ -213,6 +213,11 @@ void Decoder<V>::DecodeLoadStore(Instruction* instr) {
(instr->Bits(27, 24) == 0xC) ||
(instr->Bits(27, 24) == 0xD) );
if ((instr->Bit(28) == 0) && (instr->Bit(29) == 0) && (instr->Bit(26) == 1)) {
DecodeNEONLoadStore(instr);
return;
}
if (instr->Bit(24) == 0) {
if (instr->Bit(28) == 0) {
if (instr->Bit(29) == 0) {
@ -226,8 +231,6 @@ void Decoder<V>::DecodeLoadStore(Instruction* instr) {
} else {
V::VisitLoadStoreAcquireRelease(instr);
}
} else {
DecodeAdvSIMDLoadStore(instr);
}
} else {
if ((instr->Bits(31, 30) == 0x3) ||
@ -513,16 +516,14 @@ void Decoder<V>::DecodeFP(Instruction* instr) {
(instr->Bits(27, 24) == 0xF) );
if (instr->Bit(28) == 0) {
DecodeAdvSIMDDataProcessing(instr);
DecodeNEONVectorDataProcessing(instr);
} else {
if (instr->Bit(29) == 1) {
if (instr->Bits(31, 30) == 0x3) {
V::VisitUnallocated(instr);
} else if (instr->Bits(31, 30) == 0x1) {
DecodeNEONScalarDataProcessing(instr);
} else {
if (instr->Bits(31, 30) == 0x3) {
V::VisitUnallocated(instr);
} else if (instr->Bits(31, 30) == 0x1) {
DecodeAdvSIMDDataProcessing(instr);
} else {
if (instr->Bit(29) == 0) {
if (instr->Bit(24) == 0) {
if (instr->Bit(21) == 0) {
if ((instr->Bit(23) == 1) ||
@ -629,25 +630,190 @@ void Decoder<V>::DecodeFP(Instruction* instr) {
V::VisitFPDataProcessing3Source(instr);
}
}
} else {
V::VisitUnallocated(instr);
}
}
}
}
template<typename V>
void Decoder<V>::DecodeAdvSIMDLoadStore(Instruction* instr) {
// TODO(all): Implement Advanced SIMD load/store instruction decode.
template <typename V>
void Decoder<V>::DecodeNEONLoadStore(Instruction* instr) {
DCHECK(instr->Bits(29, 25) == 0x6);
V::VisitUnimplemented(instr);
if (instr->Bit(31) == 0) {
if ((instr->Bit(24) == 0) && (instr->Bit(21) == 1)) {
V::VisitUnallocated(instr);
return;
}
if (instr->Bit(23) == 0) {
if (instr->Bits(20, 16) == 0) {
if (instr->Bit(24) == 0) {
V::VisitNEONLoadStoreMultiStruct(instr);
} else {
V::VisitNEONLoadStoreSingleStruct(instr);
}
} else {
V::VisitUnallocated(instr);
}
} else {
if (instr->Bit(24) == 0) {
V::VisitNEONLoadStoreMultiStructPostIndex(instr);
} else {
V::VisitNEONLoadStoreSingleStructPostIndex(instr);
}
}
} else {
V::VisitUnallocated(instr);
}
}
template <typename V>
void Decoder<V>::DecodeNEONVectorDataProcessing(Instruction* instr) {
DCHECK(instr->Bits(28, 25) == 0x7);
if (instr->Bit(31) == 0) {
if (instr->Bit(24) == 0) {
if (instr->Bit(21) == 0) {
if (instr->Bit(15) == 0) {
if (instr->Bit(10) == 0) {
if (instr->Bit(29) == 0) {
if (instr->Bit(11) == 0) {
V::VisitNEONTable(instr);
} else {
V::VisitNEONPerm(instr);
}
} else {
V::VisitNEONExtract(instr);
}
} else {
if (instr->Bits(23, 22) == 0) {
V::VisitNEONCopy(instr);
} else {
V::VisitUnallocated(instr);
}
}
} else {
V::VisitUnallocated(instr);
}
} else {
if (instr->Bit(10) == 0) {
if (instr->Bit(11) == 0) {
V::VisitNEON3Different(instr);
} else {
if (instr->Bits(18, 17) == 0) {
if (instr->Bit(20) == 0) {
if (instr->Bit(19) == 0) {
V::VisitNEON2RegMisc(instr);
} else {
if (instr->Bits(30, 29) == 0x2) {
V::VisitUnallocated(instr);
} else {
V::VisitUnallocated(instr);
}
}
} else {
if (instr->Bit(19) == 0) {
V::VisitNEONAcrossLanes(instr);
} else {
V::VisitUnallocated(instr);
}
}
} else {
V::VisitUnallocated(instr);
}
}
} else {
V::VisitNEON3Same(instr);
}
}
} else {
if (instr->Bit(10) == 0) {
V::VisitNEONByIndexedElement(instr);
} else {
if (instr->Bit(23) == 0) {
if (instr->Bits(22, 19) == 0) {
V::VisitNEONModifiedImmediate(instr);
} else {
V::VisitNEONShiftImmediate(instr);
}
} else {
V::VisitUnallocated(instr);
}
}
}
} else {
V::VisitUnallocated(instr);
}
}
template<typename V>
void Decoder<V>::DecodeAdvSIMDDataProcessing(Instruction* instr) {
// TODO(all): Implement Advanced SIMD data processing instruction decode.
DCHECK(instr->Bits(27, 25) == 0x7);
V::VisitUnimplemented(instr);
template <typename V>
void Decoder<V>::DecodeNEONScalarDataProcessing(Instruction* instr) {
DCHECK(instr->Bits(28, 25) == 0xF);
if (instr->Bit(24) == 0) {
if (instr->Bit(21) == 0) {
if (instr->Bit(15) == 0) {
if (instr->Bit(10) == 0) {
if (instr->Bit(29) == 0) {
if (instr->Bit(11) == 0) {
V::VisitUnallocated(instr);
} else {
V::VisitUnallocated(instr);
}
} else {
V::VisitUnallocated(instr);
}
} else {
if (instr->Bits(23, 22) == 0) {
V::VisitNEONScalarCopy(instr);
} else {
V::VisitUnallocated(instr);
}
}
} else {
V::VisitUnallocated(instr);
}
} else {
if (instr->Bit(10) == 0) {
if (instr->Bit(11) == 0) {
V::VisitNEONScalar3Diff(instr);
} else {
if (instr->Bits(18, 17) == 0) {
if (instr->Bit(20) == 0) {
if (instr->Bit(19) == 0) {
V::VisitNEONScalar2RegMisc(instr);
} else {
if (instr->Bit(29) == 0) {
V::VisitUnallocated(instr);
} else {
V::VisitUnallocated(instr);
}
}
} else {
if (instr->Bit(19) == 0) {
V::VisitNEONScalarPairwise(instr);
} else {
V::VisitUnallocated(instr);
}
}
} else {
V::VisitUnallocated(instr);
}
}
} else {
V::VisitNEONScalar3Same(instr);
}
}
} else {
if (instr->Bit(10) == 0) {
V::VisitNEONScalarByIndexedElement(instr);
} else {
if (instr->Bit(23) == 0) {
V::VisitNEONScalarShiftImmediate(instr);
} else {
V::VisitUnallocated(instr);
}
}
}
}

View File

@ -16,50 +16,72 @@ namespace internal {
// List macro containing all visitors needed by the decoder class.
#define VISITOR_LIST(V) \
V(PCRelAddressing) \
V(AddSubImmediate) \
V(LogicalImmediate) \
V(MoveWideImmediate) \
V(Bitfield) \
V(Extract) \
V(UnconditionalBranch) \
V(UnconditionalBranchToRegister) \
V(CompareBranch) \
V(TestBranch) \
V(ConditionalBranch) \
V(System) \
V(Exception) \
V(LoadStorePairPostIndex) \
V(LoadStorePairOffset) \
V(LoadStorePairPreIndex) \
V(LoadLiteral) \
V(LoadStoreUnscaledOffset) \
V(LoadStorePostIndex) \
V(LoadStorePreIndex) \
V(LoadStoreRegisterOffset) \
V(LoadStoreUnsignedOffset) \
V(LoadStoreAcquireRelease) \
V(LogicalShifted) \
V(AddSubShifted) \
V(AddSubExtended) \
V(AddSubWithCarry) \
V(ConditionalCompareRegister) \
V(ConditionalCompareImmediate) \
V(ConditionalSelect) \
V(DataProcessing1Source) \
V(DataProcessing2Source) \
V(DataProcessing3Source) \
V(FPCompare) \
V(FPConditionalCompare) \
V(FPConditionalSelect) \
V(FPImmediate) \
V(FPDataProcessing1Source) \
V(FPDataProcessing2Source) \
V(FPDataProcessing3Source) \
V(FPIntegerConvert) \
V(FPFixedPointConvert) \
V(Unallocated) \
#define VISITOR_LIST(V) \
V(PCRelAddressing) \
V(AddSubImmediate) \
V(LogicalImmediate) \
V(MoveWideImmediate) \
V(Bitfield) \
V(Extract) \
V(UnconditionalBranch) \
V(UnconditionalBranchToRegister) \
V(CompareBranch) \
V(TestBranch) \
V(ConditionalBranch) \
V(System) \
V(Exception) \
V(LoadStorePairPostIndex) \
V(LoadStorePairOffset) \
V(LoadStorePairPreIndex) \
V(LoadLiteral) \
V(LoadStoreUnscaledOffset) \
V(LoadStorePostIndex) \
V(LoadStorePreIndex) \
V(LoadStoreRegisterOffset) \
V(LoadStoreUnsignedOffset) \
V(LoadStoreAcquireRelease) \
V(LogicalShifted) \
V(AddSubShifted) \
V(AddSubExtended) \
V(AddSubWithCarry) \
V(ConditionalCompareRegister) \
V(ConditionalCompareImmediate) \
V(ConditionalSelect) \
V(DataProcessing1Source) \
V(DataProcessing2Source) \
V(DataProcessing3Source) \
V(FPCompare) \
V(FPConditionalCompare) \
V(FPConditionalSelect) \
V(FPImmediate) \
V(FPDataProcessing1Source) \
V(FPDataProcessing2Source) \
V(FPDataProcessing3Source) \
V(FPIntegerConvert) \
V(FPFixedPointConvert) \
V(NEON2RegMisc) \
V(NEON3Different) \
V(NEON3Same) \
V(NEONAcrossLanes) \
V(NEONByIndexedElement) \
V(NEONCopy) \
V(NEONExtract) \
V(NEONLoadStoreMultiStruct) \
V(NEONLoadStoreMultiStructPostIndex) \
V(NEONLoadStoreSingleStruct) \
V(NEONLoadStoreSingleStructPostIndex) \
V(NEONModifiedImmediate) \
V(NEONScalar2RegMisc) \
V(NEONScalar3Diff) \
V(NEONScalar3Same) \
V(NEONScalarByIndexedElement) \
V(NEONScalarCopy) \
V(NEONScalarPairwise) \
V(NEONScalarShiftImmediate) \
V(NEONShiftImmediate) \
V(NEONTable) \
V(NEONPerm) \
V(Unallocated) \
V(Unimplemented)
// The Visitor interface. Disassembler and simulator (and other tools)
@ -109,6 +131,8 @@ class DispatchingDecoderVisitor : public DecoderVisitor {
// stored by the decoder.
void RemoveVisitor(DecoderVisitor* visitor);
void VisitNEONShiftImmediate(const Instruction* instr);
#define DECLARE(A) void Visit##A(Instruction* instr);
VISITOR_LIST(DECLARE)
#undef DECLARE
@ -173,12 +197,17 @@ class Decoder : public V {
// Decode the Advanced SIMD (NEON) load/store part of the instruction tree,
// and call the corresponding visitors.
// On entry, instruction bits 29:25 = 0x6.
void DecodeAdvSIMDLoadStore(Instruction* instr);
void DecodeNEONLoadStore(Instruction* instr);
// Decode the Advanced SIMD (NEON) data processing part of the instruction
// tree, and call the corresponding visitors.
// On entry, instruction bits 27:25 = 0x7.
void DecodeAdvSIMDDataProcessing(Instruction* instr);
void DecodeNEONVectorDataProcessing(Instruction* instr);
// Decode the Advanced SIMD (NEON) scalar data processing part of the
// instruction tree, and call the corresponding visitors.
// On entry, instruction bits 28:25 = 0xF.
void DecodeNEONScalarDataProcessing(Instruction* instr);
};

View File

@ -87,26 +87,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
void Deoptimizer::SetPlatformCompiledStubRegisters(
FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
ApiFunction function(descriptor->deoptimization_handler());
ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
int params = descriptor->GetHandlerParameterCount();
output_frame->SetRegister(x0.code(), params);
output_frame->SetRegister(x1.code(), handler);
}
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
Float64 double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
}
}
#define __ masm()->
void Deoptimizer::TableEntryGenerator::Generate() {
@ -118,13 +98,13 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Save all allocatable double registers.
CPURegList saved_double_registers(
CPURegister::kFPRegister, kDRegSizeInBits,
CPURegister::kVRegister, kDRegSizeInBits,
RegisterConfiguration::Crankshaft()->allocatable_double_codes_mask());
__ PushCPURegList(saved_double_registers);
// Save all allocatable float registers.
CPURegList saved_float_registers(
CPURegister::kFPRegister, kSRegSizeInBits,
CPURegister::kVRegister, kSRegSizeInBits,
RegisterConfiguration::Crankshaft()->allocatable_float_codes_mask());
__ PushCPURegList(saved_float_registers);
@ -133,7 +113,8 @@ void Deoptimizer::TableEntryGenerator::Generate() {
saved_registers.Combine(fp);
__ PushCPURegList(saved_registers);
__ Mov(x3, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
__ Mov(x3, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
isolate())));
__ Str(fp, MemOperand(x3));
const int kSavedRegistersAreaSize =

File diff suppressed because it is too large Load Diff

View File

@ -5,6 +5,7 @@
#ifndef V8_ARM64_DISASM_ARM64_H
#define V8_ARM64_DISASM_ARM64_H
#include "src/arm64/assembler-arm64.h"
#include "src/arm64/decoder-arm64.h"
#include "src/arm64/instructions-arm64.h"
#include "src/globals.h"
@ -29,6 +30,13 @@ class DisassemblingDecoder : public DecoderVisitor {
protected:
virtual void ProcessOutput(Instruction* instr);
// Default output functions. The functions below implement a default way of
// printing elements in the disassembly. A sub-class can override these to
// customize the disassembly output.
// Prints the name of a register.
virtual void AppendRegisterNameToOutput(const CPURegister& reg);
void Format(Instruction* instr, const char* mnemonic, const char* format);
void Substitute(Instruction* instr, const char* string);
int SubstituteField(Instruction* instr, const char* format);

View File

@ -19,15 +19,6 @@ Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
Register JavaScriptFrame::constant_pool_pointer_register() {
UNREACHABLE();
return no_reg;
}
Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
Register StubFailureTrampolineFrame::context_register() { return cp; }
Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
UNREACHABLE();
return no_reg;
}

View File

@ -21,7 +21,7 @@ bool Instruction::IsLoad() const {
if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
return Mask(LoadStorePairLBit) != 0;
} else {
LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask));
LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
switch (op) {
case LDRB_w:
case LDRH_w:
@ -32,8 +32,12 @@ bool Instruction::IsLoad() const {
case LDRSH_w:
case LDRSH_x:
case LDRSW_x:
case LDR_b:
case LDR_h:
case LDR_s:
case LDR_d: return true;
case LDR_d:
case LDR_q:
return true;
default: return false;
}
}
@ -48,14 +52,18 @@ bool Instruction::IsStore() const {
if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
return Mask(LoadStorePairLBit) == 0;
} else {
LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask));
LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
switch (op) {
case STRB_w:
case STRH_w:
case STR_w:
case STR_x:
case STR_b:
case STR_h:
case STR_s:
case STR_d: return true;
case STR_d:
case STR_q:
return true;
default: return false;
}
}
@ -136,46 +144,50 @@ uint64_t Instruction::ImmLogical() {
}
}
UNREACHABLE();
return 0;
}
float Instruction::ImmFP32() {
// ImmFP: abcdefgh (8 bits)
// Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
// where B is b ^ 1
uint32_t bits = ImmFP();
uint32_t bit7 = (bits >> 7) & 0x1;
uint32_t bit6 = (bits >> 6) & 0x1;
uint32_t bit5_to_0 = bits & 0x3f;
uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
return rawbits_to_float(result);
uint32_t Instruction::ImmNEONabcdefgh() const {
return ImmNEONabc() << 5 | ImmNEONdefgh();
}
float Instruction::ImmFP32() { return Imm8ToFP32(ImmFP()); }
double Instruction::ImmFP64() {
// ImmFP: abcdefgh (8 bits)
// Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
// 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
// where B is b ^ 1
uint32_t bits = ImmFP();
uint64_t bit7 = (bits >> 7) & 0x1;
uint64_t bit6 = (bits >> 6) & 0x1;
uint64_t bit5_to_0 = bits & 0x3f;
uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
double Instruction::ImmFP64() { return Imm8ToFP64(ImmFP()); }
return rawbits_to_double(result);
float Instruction::ImmNEONFP32() const { return Imm8ToFP32(ImmNEONabcdefgh()); }
double Instruction::ImmNEONFP64() const {
return Imm8ToFP64(ImmNEONabcdefgh());
}
unsigned CalcLSDataSize(LoadStoreOp op) {
DCHECK_EQ(static_cast<unsigned>(LSSize_offset + LSSize_width),
kInstructionSize * 8);
unsigned size = static_cast<Instr>(op) >> LSSize_offset;
if ((op & LSVector_mask) != 0) {
// Vector register memory operations encode the access size in the "size"
// and "opc" fields.
if ((size == 0) && ((op & LSOpc_mask) >> LSOpc_offset) >= 2) {
size = kQRegSizeLog2;
}
}
return size;
}
LSDataSize CalcLSPairDataSize(LoadStorePairOp op) {
unsigned CalcLSPairDataSize(LoadStorePairOp op) {
static_assert(kXRegSize == kDRegSize, "X and D registers must be same size.");
static_assert(kWRegSize == kSRegSize, "W and S registers must be same size.");
switch (op) {
case STP_q:
case LDP_q:
return kQRegSizeLog2;
case STP_x:
case LDP_x:
case STP_d:
case LDP_d: return LSDoubleWord;
default: return LSWord;
case LDP_d:
return kXRegSizeLog2;
default:
return kWRegSizeLog2;
}
}
@ -334,7 +346,405 @@ uint64_t InstructionSequence::InlineData() const {
return payload;
}
VectorFormat VectorFormatHalfWidth(VectorFormat vform) {
DCHECK(vform == kFormat8H || vform == kFormat4S || vform == kFormat2D ||
vform == kFormatH || vform == kFormatS || vform == kFormatD);
switch (vform) {
case kFormat8H:
return kFormat8B;
case kFormat4S:
return kFormat4H;
case kFormat2D:
return kFormat2S;
case kFormatH:
return kFormatB;
case kFormatS:
return kFormatH;
case kFormatD:
return kFormatS;
default:
UNREACHABLE();
}
}
VectorFormat VectorFormatDoubleWidth(VectorFormat vform) {
DCHECK(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S ||
vform == kFormatB || vform == kFormatH || vform == kFormatS);
switch (vform) {
case kFormat8B:
return kFormat8H;
case kFormat4H:
return kFormat4S;
case kFormat2S:
return kFormat2D;
case kFormatB:
return kFormatH;
case kFormatH:
return kFormatS;
case kFormatS:
return kFormatD;
default:
UNREACHABLE();
}
}
VectorFormat VectorFormatFillQ(VectorFormat vform) {
switch (vform) {
case kFormatB:
case kFormat8B:
case kFormat16B:
return kFormat16B;
case kFormatH:
case kFormat4H:
case kFormat8H:
return kFormat8H;
case kFormatS:
case kFormat2S:
case kFormat4S:
return kFormat4S;
case kFormatD:
case kFormat1D:
case kFormat2D:
return kFormat2D;
default:
UNREACHABLE();
}
}
VectorFormat VectorFormatHalfWidthDoubleLanes(VectorFormat vform) {
switch (vform) {
case kFormat4H:
return kFormat8B;
case kFormat8H:
return kFormat16B;
case kFormat2S:
return kFormat4H;
case kFormat4S:
return kFormat8H;
case kFormat1D:
return kFormat2S;
case kFormat2D:
return kFormat4S;
default:
UNREACHABLE();
}
}
VectorFormat VectorFormatDoubleLanes(VectorFormat vform) {
DCHECK(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S);
switch (vform) {
case kFormat8B:
return kFormat16B;
case kFormat4H:
return kFormat8H;
case kFormat2S:
return kFormat4S;
default:
UNREACHABLE();
}
}
VectorFormat VectorFormatHalfLanes(VectorFormat vform) {
DCHECK(vform == kFormat16B || vform == kFormat8H || vform == kFormat4S);
switch (vform) {
case kFormat16B:
return kFormat8B;
case kFormat8H:
return kFormat4H;
case kFormat4S:
return kFormat2S;
default:
UNREACHABLE();
}
}
VectorFormat ScalarFormatFromLaneSize(int laneSize) {
switch (laneSize) {
case 8:
return kFormatB;
case 16:
return kFormatH;
case 32:
return kFormatS;
case 64:
return kFormatD;
default:
UNREACHABLE();
}
}
VectorFormat ScalarFormatFromFormat(VectorFormat vform) {
return ScalarFormatFromLaneSize(LaneSizeInBitsFromFormat(vform));
}
unsigned RegisterSizeInBytesFromFormat(VectorFormat vform) {
return RegisterSizeInBitsFromFormat(vform) / 8;
}
unsigned RegisterSizeInBitsFromFormat(VectorFormat vform) {
DCHECK_NE(vform, kFormatUndefined);
switch (vform) {
case kFormatB:
return kBRegSizeInBits;
case kFormatH:
return kHRegSizeInBits;
case kFormatS:
return kSRegSizeInBits;
case kFormatD:
return kDRegSizeInBits;
case kFormat8B:
case kFormat4H:
case kFormat2S:
case kFormat1D:
return kDRegSizeInBits;
default:
return kQRegSizeInBits;
}
}
unsigned LaneSizeInBitsFromFormat(VectorFormat vform) {
DCHECK_NE(vform, kFormatUndefined);
switch (vform) {
case kFormatB:
case kFormat8B:
case kFormat16B:
return 8;
case kFormatH:
case kFormat4H:
case kFormat8H:
return 16;
case kFormatS:
case kFormat2S:
case kFormat4S:
return 32;
case kFormatD:
case kFormat1D:
case kFormat2D:
return 64;
default:
UNREACHABLE();
}
}
int LaneSizeInBytesFromFormat(VectorFormat vform) {
return LaneSizeInBitsFromFormat(vform) / 8;
}
int LaneSizeInBytesLog2FromFormat(VectorFormat vform) {
DCHECK_NE(vform, kFormatUndefined);
switch (vform) {
case kFormatB:
case kFormat8B:
case kFormat16B:
return 0;
case kFormatH:
case kFormat4H:
case kFormat8H:
return 1;
case kFormatS:
case kFormat2S:
case kFormat4S:
return 2;
case kFormatD:
case kFormat1D:
case kFormat2D:
return 3;
default:
UNREACHABLE();
}
}
int LaneCountFromFormat(VectorFormat vform) {
DCHECK_NE(vform, kFormatUndefined);
switch (vform) {
case kFormat16B:
return 16;
case kFormat8B:
case kFormat8H:
return 8;
case kFormat4H:
case kFormat4S:
return 4;
case kFormat2S:
case kFormat2D:
return 2;
case kFormat1D:
case kFormatB:
case kFormatH:
case kFormatS:
case kFormatD:
return 1;
default:
UNREACHABLE();
}
}
int MaxLaneCountFromFormat(VectorFormat vform) {
DCHECK_NE(vform, kFormatUndefined);
switch (vform) {
case kFormatB:
case kFormat8B:
case kFormat16B:
return 16;
case kFormatH:
case kFormat4H:
case kFormat8H:
return 8;
case kFormatS:
case kFormat2S:
case kFormat4S:
return 4;
case kFormatD:
case kFormat1D:
case kFormat2D:
return 2;
default:
UNREACHABLE();
}
}
// Does 'vform' indicate a vector format or a scalar format?
bool IsVectorFormat(VectorFormat vform) {
DCHECK_NE(vform, kFormatUndefined);
switch (vform) {
case kFormatB:
case kFormatH:
case kFormatS:
case kFormatD:
return false;
default:
return true;
}
}
int64_t MaxIntFromFormat(VectorFormat vform) {
return INT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform));
}
int64_t MinIntFromFormat(VectorFormat vform) {
return INT64_MIN >> (64 - LaneSizeInBitsFromFormat(vform));
}
uint64_t MaxUintFromFormat(VectorFormat vform) {
return UINT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform));
}
NEONFormatDecoder::NEONFormatDecoder(const Instruction* instr) {
instrbits_ = instr->InstructionBits();
SetFormatMaps(IntegerFormatMap());
}
NEONFormatDecoder::NEONFormatDecoder(const Instruction* instr,
const NEONFormatMap* format) {
instrbits_ = instr->InstructionBits();
SetFormatMaps(format);
}
NEONFormatDecoder::NEONFormatDecoder(const Instruction* instr,
const NEONFormatMap* format0,
const NEONFormatMap* format1) {
instrbits_ = instr->InstructionBits();
SetFormatMaps(format0, format1);
}
NEONFormatDecoder::NEONFormatDecoder(const Instruction* instr,
const NEONFormatMap* format0,
const NEONFormatMap* format1,
const NEONFormatMap* format2) {
instrbits_ = instr->InstructionBits();
SetFormatMaps(format0, format1, format2);
}
void NEONFormatDecoder::SetFormatMaps(const NEONFormatMap* format0,
const NEONFormatMap* format1,
const NEONFormatMap* format2) {
DCHECK_NOT_NULL(format0);
formats_[0] = format0;
formats_[1] = (format1 == NULL) ? formats_[0] : format1;
formats_[2] = (format2 == NULL) ? formats_[1] : format2;
}
void NEONFormatDecoder::SetFormatMap(unsigned index,
const NEONFormatMap* format) {
DCHECK_LT(index, arraysize(formats_));
DCHECK_NOT_NULL(format);
formats_[index] = format;
}
const char* NEONFormatDecoder::SubstitutePlaceholders(const char* string) {
return Substitute(string, kPlaceholder, kPlaceholder, kPlaceholder);
}
const char* NEONFormatDecoder::Substitute(const char* string,
SubstitutionMode mode0,
SubstitutionMode mode1,
SubstitutionMode mode2) {
snprintf(form_buffer_, sizeof(form_buffer_), string, GetSubstitute(0, mode0),
GetSubstitute(1, mode1), GetSubstitute(2, mode2));
return form_buffer_;
}
const char* NEONFormatDecoder::Mnemonic(const char* mnemonic) {
if ((instrbits_ & NEON_Q) != 0) {
snprintf(mne_buffer_, sizeof(mne_buffer_), "%s2", mnemonic);
return mne_buffer_;
}
return mnemonic;
}
VectorFormat NEONFormatDecoder::GetVectorFormat(int format_index) {
return GetVectorFormat(formats_[format_index]);
}
VectorFormat NEONFormatDecoder::GetVectorFormat(
const NEONFormatMap* format_map) {
static const VectorFormat vform[] = {
kFormatUndefined, kFormat8B, kFormat16B, kFormat4H, kFormat8H,
kFormat2S, kFormat4S, kFormat1D, kFormat2D, kFormatB,
kFormatH, kFormatS, kFormatD};
DCHECK_LT(GetNEONFormat(format_map), arraysize(vform));
return vform[GetNEONFormat(format_map)];
}
const char* NEONFormatDecoder::GetSubstitute(int index, SubstitutionMode mode) {
if (mode == kFormat) {
return NEONFormatAsString(GetNEONFormat(formats_[index]));
}
DCHECK_EQ(mode, kPlaceholder);
return NEONFormatAsPlaceholder(GetNEONFormat(formats_[index]));
}
NEONFormat NEONFormatDecoder::GetNEONFormat(const NEONFormatMap* format_map) {
return format_map->map[PickBits(format_map->bits)];
}
const char* NEONFormatDecoder::NEONFormatAsString(NEONFormat format) {
static const char* formats[] = {"undefined", "8b", "16b", "4h", "8h",
"2s", "4s", "1d", "2d", "b",
"h", "s", "d"};
DCHECK_LT(format, arraysize(formats));
return formats[format];
}
const char* NEONFormatDecoder::NEONFormatAsPlaceholder(NEONFormat format) {
DCHECK((format == NF_B) || (format == NF_H) || (format == NF_S) ||
(format == NF_D) || (format == NF_UNDEF));
static const char* formats[] = {
"undefined", "undefined", "undefined", "undefined", "undefined",
"undefined", "undefined", "undefined", "undefined", "'B",
"'H", "'S", "'D"};
return formats[format];
}
uint8_t NEONFormatDecoder::PickBits(const uint8_t bits[]) {
uint8_t result = 0;
for (unsigned b = 0; b < kNEONFormatMaxBits; b++) {
if (bits[b] == 0) break;
result <<= 1;
result |= ((instrbits_ & (1 << bits[b])) == 0) ? 0 : 1;
}
return result;
}
} // namespace internal
} // namespace v8

View File

@ -23,13 +23,17 @@ typedef uint32_t Instr;
// symbol is defined as uint32_t/uint64_t initialized with the desired bit
// pattern. Otherwise, the same symbol is declared as an external float/double.
#if defined(ARM64_DEFINE_FP_STATICS)
#define DEFINE_FLOAT16(name, value) extern const uint16_t name = value
#define DEFINE_FLOAT(name, value) extern const uint32_t name = value
#define DEFINE_DOUBLE(name, value) extern const uint64_t name = value
#else
#define DEFINE_FLOAT16(name, value) extern const float16 name
#define DEFINE_FLOAT(name, value) extern const float name
#define DEFINE_DOUBLE(name, value) extern const double name
#endif // defined(ARM64_DEFINE_FP_STATICS)
DEFINE_FLOAT16(kFP16PositiveInfinity, 0x7c00);
DEFINE_FLOAT16(kFP16NegativeInfinity, 0xfc00);
DEFINE_FLOAT(kFP32PositiveInfinity, 0x7f800000);
DEFINE_FLOAT(kFP32NegativeInfinity, 0xff800000);
DEFINE_DOUBLE(kFP64PositiveInfinity, 0x7ff0000000000000UL);
@ -47,19 +51,14 @@ DEFINE_FLOAT(kFP32QuietNaN, 0x7fc00001);
// The default NaN values (for FPCR.DN=1).
DEFINE_DOUBLE(kFP64DefaultNaN, 0x7ff8000000000000UL);
DEFINE_FLOAT(kFP32DefaultNaN, 0x7fc00000);
DEFINE_FLOAT16(kFP16DefaultNaN, 0x7e00);
#undef DEFINE_FLOAT16
#undef DEFINE_FLOAT
#undef DEFINE_DOUBLE
enum LSDataSize {
LSByte = 0,
LSHalfword = 1,
LSWord = 2,
LSDoubleWord = 3
};
LSDataSize CalcLSPairDataSize(LoadStorePairOp op);
unsigned CalcLSDataSize(LoadStoreOp op);
unsigned CalcLSPairDataSize(LoadStorePairOp op);
enum ImmBranchType {
UnknownBranchType = 0,
@ -82,9 +81,10 @@ enum FPRounding {
FPNegativeInfinity = 0x2,
FPZero = 0x3,
// The final rounding mode is only available when explicitly specified by the
// instruction (such as with fcvta). It cannot be set in FPCR.
FPTieAway
// The final rounding modes are only available when explicitly specified by
// the instruction (such as with fcvta). They cannot be set in FPCR.
FPTieAway,
FPRoundOdd
};
enum Reg31Mode {
@ -152,14 +152,29 @@ class Instruction {
}
uint64_t ImmLogical();
unsigned ImmNEONabcdefgh() const;
float ImmFP32();
double ImmFP64();
float ImmNEONFP32() const;
double ImmNEONFP64() const;
LSDataSize SizeLSPair() const {
unsigned SizeLS() const {
return CalcLSDataSize(static_cast<LoadStoreOp>(Mask(LoadStoreMask)));
}
unsigned SizeLSPair() const {
return CalcLSPairDataSize(
static_cast<LoadStorePairOp>(Mask(LoadStorePairMask)));
}
int NEONLSIndex(int access_size_shift) const {
int q = NEONQ();
int s = NEONS();
int size = NEONLSSize();
int index = (q << 3) | (s << 2) | size;
return index >> access_size_shift;
}
// Helpers.
bool IsCondBranchImm() const {
return Mask(ConditionalBranchFMask) == ConditionalBranchFixed;
@ -181,6 +196,33 @@ class Instruction {
return BranchType() != UnknownBranchType;
}
static float Imm8ToFP32(uint32_t imm8) {
// Imm8: abcdefgh (8 bits)
// Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
// where B is b ^ 1
uint32_t bits = imm8;
uint32_t bit7 = (bits >> 7) & 0x1;
uint32_t bit6 = (bits >> 6) & 0x1;
uint32_t bit5_to_0 = bits & 0x3f;
uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
return bit_cast<float>(result);
}
static double Imm8ToFP64(uint32_t imm8) {
// Imm8: abcdefgh (8 bits)
// Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
// 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
// where B is b ^ 1
uint32_t bits = imm8;
uint64_t bit7 = (bits >> 7) & 0x1;
uint64_t bit6 = (bits >> 6) & 0x1;
uint64_t bit5_to_0 = bits & 0x3f;
uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
return bit_cast<double>(result);
}
bool IsLdrLiteral() const {
return Mask(LoadLiteralFMask) == LoadLiteralFixed;
}
@ -300,7 +342,6 @@ class Instruction {
return ImmTestBranch_width;
default:
UNREACHABLE();
return 0;
}
}
@ -417,6 +458,48 @@ class Instruction {
void SetBranchImmTarget(Instruction* target);
};
// Functions for handling NEON vector format information.
enum VectorFormat {
kFormatUndefined = 0xffffffff,
kFormat8B = NEON_8B,
kFormat16B = NEON_16B,
kFormat4H = NEON_4H,
kFormat8H = NEON_8H,
kFormat2S = NEON_2S,
kFormat4S = NEON_4S,
kFormat1D = NEON_1D,
kFormat2D = NEON_2D,
// Scalar formats. We add the scalar bit to distinguish between scalar and
// vector enumerations; the bit is always set in the encoding of scalar ops
// and always clear for vector ops. Although kFormatD and kFormat1D appear
// to be the same, their meaning is subtly different. The first is a scalar
// operation, the second a vector operation that only affects one lane.
kFormatB = NEON_B | NEONScalar,
kFormatH = NEON_H | NEONScalar,
kFormatS = NEON_S | NEONScalar,
kFormatD = NEON_D | NEONScalar
};
VectorFormat VectorFormatHalfWidth(VectorFormat vform);
VectorFormat VectorFormatDoubleWidth(VectorFormat vform);
VectorFormat VectorFormatDoubleLanes(VectorFormat vform);
VectorFormat VectorFormatHalfLanes(VectorFormat vform);
VectorFormat ScalarFormatFromLaneSize(int lanesize);
VectorFormat VectorFormatHalfWidthDoubleLanes(VectorFormat vform);
VectorFormat VectorFormatFillQ(VectorFormat vform);
VectorFormat ScalarFormatFromFormat(VectorFormat vform);
unsigned RegisterSizeInBitsFromFormat(VectorFormat vform);
unsigned RegisterSizeInBytesFromFormat(VectorFormat vform);
int LaneSizeInBytesFromFormat(VectorFormat vform);
unsigned LaneSizeInBitsFromFormat(VectorFormat vform);
int LaneSizeInBytesLog2FromFormat(VectorFormat vform);
int LaneCountFromFormat(VectorFormat vform);
int MaxLaneCountFromFormat(VectorFormat vform);
bool IsVectorFormat(VectorFormat vform);
int64_t MaxIntFromFormat(VectorFormat vform);
int64_t MinIntFromFormat(VectorFormat vform);
uint64_t MaxUintFromFormat(VectorFormat vform);
// Where Instruction looks at instructions generated by the Assembler,
// InstructionSequence looks at instructions sequences generated by the
@ -504,7 +587,7 @@ const unsigned kDebugMessageOffset = 3 * kInstructionSize;
//
// For example:
//
// __ debug("print registers and fp registers", 0, LOG_REGS | LOG_FP_REGS);
// __ debug("print registers and fp registers", 0, LOG_REGS | LOG_VREGS);
// will print the registers and fp registers only once.
//
// __ debug("trace disasm", 1, TRACE_ENABLE | LOG_DISASM);
@ -517,24 +600,201 @@ const unsigned kDebugMessageOffset = 3 * kInstructionSize;
// stops tracing the registers.
const unsigned kDebuggerTracingDirectivesMask = 3 << 6;
enum DebugParameters {
NO_PARAM = 0,
BREAK = 1 << 0,
LOG_DISASM = 1 << 1, // Use only with TRACE. Disassemble the code.
LOG_REGS = 1 << 2, // Log general purpose registers.
LOG_FP_REGS = 1 << 3, // Log floating-point registers.
LOG_SYS_REGS = 1 << 4, // Log the status flags.
LOG_WRITE = 1 << 5, // Log any memory write.
NO_PARAM = 0,
BREAK = 1 << 0,
LOG_DISASM = 1 << 1, // Use only with TRACE. Disassemble the code.
LOG_REGS = 1 << 2, // Log general purpose registers.
LOG_VREGS = 1 << 3, // Log NEON and floating-point registers.
LOG_SYS_REGS = 1 << 4, // Log the status flags.
LOG_WRITE = 1 << 5, // Log any memory write.
LOG_STATE = LOG_REGS | LOG_FP_REGS | LOG_SYS_REGS,
LOG_ALL = LOG_DISASM | LOG_STATE | LOG_WRITE,
LOG_NONE = 0,
LOG_STATE = LOG_REGS | LOG_VREGS | LOG_SYS_REGS,
LOG_ALL = LOG_DISASM | LOG_STATE | LOG_WRITE,
// Trace control.
TRACE_ENABLE = 1 << 6,
TRACE_DISABLE = 2 << 6,
TRACE_ENABLE = 1 << 6,
TRACE_DISABLE = 2 << 6,
TRACE_OVERRIDE = 3 << 6
};
enum NEONFormat {
NF_UNDEF = 0,
NF_8B = 1,
NF_16B = 2,
NF_4H = 3,
NF_8H = 4,
NF_2S = 5,
NF_4S = 6,
NF_1D = 7,
NF_2D = 8,
NF_B = 9,
NF_H = 10,
NF_S = 11,
NF_D = 12
};
static const unsigned kNEONFormatMaxBits = 6;
struct NEONFormatMap {
// The bit positions in the instruction to consider.
uint8_t bits[kNEONFormatMaxBits];
// Mapping from concatenated bits to format.
NEONFormat map[1 << kNEONFormatMaxBits];
};
class NEONFormatDecoder {
public:
enum SubstitutionMode { kPlaceholder, kFormat };
// Construct a format decoder with increasingly specific format maps for each
// substitution. If no format map is specified, the default is the integer
// format map.
explicit NEONFormatDecoder(const Instruction* instr);
NEONFormatDecoder(const Instruction* instr, const NEONFormatMap* format);
NEONFormatDecoder(const Instruction* instr, const NEONFormatMap* format0,
const NEONFormatMap* format1);
NEONFormatDecoder(const Instruction* instr, const NEONFormatMap* format0,
const NEONFormatMap* format1, const NEONFormatMap* format2);
// Set the format mapping for all or individual substitutions.
void SetFormatMaps(const NEONFormatMap* format0,
const NEONFormatMap* format1 = NULL,
const NEONFormatMap* format2 = NULL);
void SetFormatMap(unsigned index, const NEONFormatMap* format);
// Substitute %s in the input string with the placeholder string for each
// register, ie. "'B", "'H", etc.
const char* SubstitutePlaceholders(const char* string);
// Substitute %s in the input string with a new string based on the
// substitution mode.
const char* Substitute(const char* string, SubstitutionMode mode0 = kFormat,
SubstitutionMode mode1 = kFormat,
SubstitutionMode mode2 = kFormat);
// Append a "2" to a mnemonic string based of the state of the Q bit.
const char* Mnemonic(const char* mnemonic);
VectorFormat GetVectorFormat(int format_index = 0);
VectorFormat GetVectorFormat(const NEONFormatMap* format_map);
// Built in mappings for common cases.
// The integer format map uses three bits (Q, size<1:0>) to encode the
// "standard" set of NEON integer vector formats.
static const NEONFormatMap* IntegerFormatMap() {
static const NEONFormatMap map = {
{23, 22, 30},
{NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_UNDEF, NF_2D}};
return &map;
}
// The long integer format map uses two bits (size<1:0>) to encode the
// long set of NEON integer vector formats. These are used in narrow, wide
// and long operations.
static const NEONFormatMap* LongIntegerFormatMap() {
static const NEONFormatMap map = {{23, 22}, {NF_8H, NF_4S, NF_2D}};
return &map;
}
// The FP format map uses two bits (Q, size<0>) to encode the NEON FP vector
// formats: NF_2S, NF_4S, NF_2D.
static const NEONFormatMap* FPFormatMap() {
// The FP format map assumes two bits (Q, size<0>) are used to encode the
// NEON FP vector formats: NF_2S, NF_4S, NF_2D.
static const NEONFormatMap map = {{22, 30},
{NF_2S, NF_4S, NF_UNDEF, NF_2D}};
return &map;
}
// The load/store format map uses three bits (Q, 11, 10) to encode the
// set of NEON vector formats.
static const NEONFormatMap* LoadStoreFormatMap() {
static const NEONFormatMap map = {
{11, 10, 30},
{NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}};
return &map;
}
// The logical format map uses one bit (Q) to encode the NEON vector format:
// NF_8B, NF_16B.
static const NEONFormatMap* LogicalFormatMap() {
static const NEONFormatMap map = {{30}, {NF_8B, NF_16B}};
return &map;
}
// The triangular format map uses between two and five bits to encode the NEON
// vector format:
// xxx10->8B, xxx11->16B, xx100->4H, xx101->8H
// x1000->2S, x1001->4S, 10001->2D, all others undefined.
static const NEONFormatMap* TriangularFormatMap() {
static const NEONFormatMap map = {
{19, 18, 17, 16, 30},
{NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B,
NF_2S, NF_4S, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B,
NF_UNDEF, NF_2D, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B,
NF_2S, NF_4S, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B}};
return &map;
}
// The scalar format map uses two bits (size<1:0>) to encode the NEON scalar
// formats: NF_B, NF_H, NF_S, NF_D.
static const NEONFormatMap* ScalarFormatMap() {
static const NEONFormatMap map = {{23, 22}, {NF_B, NF_H, NF_S, NF_D}};
return &map;
}
// The long scalar format map uses two bits (size<1:0>) to encode the longer
// NEON scalar formats: NF_H, NF_S, NF_D.
static const NEONFormatMap* LongScalarFormatMap() {
static const NEONFormatMap map = {{23, 22}, {NF_H, NF_S, NF_D}};
return &map;
}
// The FP scalar format map assumes one bit (size<0>) is used to encode the
// NEON FP scalar formats: NF_S, NF_D.
static const NEONFormatMap* FPScalarFormatMap() {
static const NEONFormatMap map = {{22}, {NF_S, NF_D}};
return &map;
}
// The triangular scalar format map uses between one and four bits to encode
// the NEON FP scalar formats:
// xxx1->B, xx10->H, x100->S, 1000->D, all others undefined.
static const NEONFormatMap* TriangularScalarFormatMap() {
static const NEONFormatMap map = {
{19, 18, 17, 16},
{NF_UNDEF, NF_B, NF_H, NF_B, NF_S, NF_B, NF_H, NF_B, NF_D, NF_B, NF_H,
NF_B, NF_S, NF_B, NF_H, NF_B}};
return &map;
}
private:
// Get a pointer to a string that represents the format or placeholder for
// the specified substitution index, based on the format map and instruction.
const char* GetSubstitute(int index, SubstitutionMode mode);
// Get the NEONFormat enumerated value for bits obtained from the
// instruction based on the specified format mapping.
NEONFormat GetNEONFormat(const NEONFormatMap* format_map);
// Convert a NEONFormat into a string.
static const char* NEONFormatAsString(NEONFormat format);
// Convert a NEONFormat into a register placeholder string.
static const char* NEONFormatAsPlaceholder(NEONFormat format);
// Select bits from instrbits_ defined by the bits array, concatenate them,
// and return the value.
uint8_t PickBits(const uint8_t bits[]);
Instr instrbits_;
const NEONFormatMap* formats_[3];
char form_buffer_[64];
char mne_buffer_[16];
};
} // namespace internal
} // namespace v8

View File

@ -377,7 +377,7 @@ void Instrument::InstrumentLoadStore(Instruction* instr) {
static Counter* load_fp_counter = GetCounter("Load FP");
static Counter* store_fp_counter = GetCounter("Store FP");
switch (instr->Mask(LoadStoreOpMask)) {
switch (instr->Mask(LoadStoreMask)) {
case STRB_w: // Fall through.
case STRH_w: // Fall through.
case STR_w: // Fall through.
@ -595,6 +595,159 @@ void Instrument::VisitFPFixedPointConvert(Instruction* instr) {
counter->Increment();
}
void Instrument::VisitNEON2RegMisc(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEON3Different(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEON3Same(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONAcrossLanes(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONByIndexedElement(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONCopy(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONExtract(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONLoadStoreMultiStruct(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONLoadStoreMultiStructPostIndex(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONLoadStoreSingleStruct(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONLoadStoreSingleStructPostIndex(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONModifiedImmediate(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONPerm(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalar2RegMisc(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalar3Diff(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalar3Same(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalarByIndexedElement(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalarCopy(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalarPairwise(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONScalarShiftImmediate(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONShiftImmediate(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitNEONTable(Instruction* instr) {
USE(instr);
Update();
static Counter* counter = GetCounter("NEON");
counter->Increment();
}
void Instrument::VisitUnallocated(Instruction* instr) {
Update();

View File

@ -49,6 +49,8 @@ const Register StoreTransitionDescriptor::MapRegister() { return x5; }
const Register StringCompareDescriptor::LeftRegister() { return x1; }
const Register StringCompareDescriptor::RightRegister() { return x0; }
const Register StringConcatDescriptor::ArgumentsCountRegister() { return x0; }
const Register ApiGetterDescriptor::HolderRegister() { return x0; }
const Register ApiGetterDescriptor::CallbackRegister() { return x3; }
@ -174,6 +176,16 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x0 : number of arguments (on the stack, not including receiver)
// x1 : the target to call
// x2 : arguments list (FixedArray)
// x4 : arguments list length (untagged)
Register registers[] = {x1, x0, x2, x4};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallForwardVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1: target
@ -183,6 +195,34 @@ void CallForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallWithSpreadDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x0 : number of arguments (on the stack, not including receiver)
// x1 : the target to call
// x2 : the object to spread
Register registers[] = {x1, x0, x2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallWithArrayLikeDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1 : the target to call
// x2 : the arguments list
Register registers[] = {x1, x2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ConstructVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x0 : number of arguments (on the stack, not including receiver)
// x1 : the target to call
// x3 : the new target
// x2 : arguments list (FixedArray)
// x4 : arguments list length (untagged)
Register registers[] = {x1, x3, x0, x2, x4};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x3: new target
@ -193,6 +233,25 @@ void ConstructForwardVarargsDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ConstructWithSpreadDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x0 : number of arguments (on the stack, not including receiver)
// x1 : the target to call
// x3 : the new target
// x2 : the object to spread
Register registers[] = {x1, x3, x0, x2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1 : the target to call
// x3 : the new target
// x2 : the arguments list
Register registers[] = {x1, x3, x2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ConstructStubDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x3: new target
@ -407,8 +466,7 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
Register registers[] = {
x0, // the value to pass to the generator
x1, // the JSGeneratorObject to resume
x2, // the resume mode (tagged)
x3 // SuspendFlags (tagged)
x2 // the resume mode (tagged)
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

4191
deps/v8/src/arm64/simulator-logic-arm64.cc vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -12,23 +12,78 @@ namespace internal {
#define __ assm->
uint32_t float_sign(float val) {
uint32_t bits = bit_cast<uint32_t>(val);
return unsigned_bitextract_32(31, 31, bits);
}
uint32_t float_exp(float val) {
uint32_t bits = bit_cast<uint32_t>(val);
return unsigned_bitextract_32(30, 23, bits);
}
uint32_t float_mantissa(float val) {
uint32_t bits = bit_cast<uint32_t>(val);
return unsigned_bitextract_32(22, 0, bits);
}
uint32_t double_sign(double val) {
uint64_t bits = bit_cast<uint64_t>(val);
return static_cast<uint32_t>(unsigned_bitextract_64(63, 63, bits));
}
uint32_t double_exp(double val) {
uint64_t bits = bit_cast<uint64_t>(val);
return static_cast<uint32_t>(unsigned_bitextract_64(62, 52, bits));
}
uint64_t double_mantissa(double val) {
uint64_t bits = bit_cast<uint64_t>(val);
return unsigned_bitextract_64(51, 0, bits);
}
float float_pack(uint32_t sign, uint32_t exp, uint32_t mantissa) {
uint32_t bits = sign << kFloatExponentBits | exp;
return bit_cast<float>((bits << kFloatMantissaBits) | mantissa);
}
double double_pack(uint64_t sign, uint64_t exp, uint64_t mantissa) {
uint64_t bits = sign << kDoubleExponentBits | exp;
return bit_cast<double>((bits << kDoubleMantissaBits) | mantissa);
}
int float16classify(float16 value) {
const uint16_t exponent_max = (1 << kFloat16ExponentBits) - 1;
const uint16_t exponent_mask = exponent_max << kFloat16MantissaBits;
const uint16_t mantissa_mask = (1 << kFloat16MantissaBits) - 1;
const uint16_t exponent = (value & exponent_mask) >> kFloat16MantissaBits;
const uint16_t mantissa = value & mantissa_mask;
if (exponent == 0) {
if (mantissa == 0) {
return FP_ZERO;
}
return FP_SUBNORMAL;
} else if (exponent == exponent_max) {
if (mantissa == 0) {
return FP_INFINITE;
}
return FP_NAN;
}
return FP_NORMAL;
}
int CountLeadingZeros(uint64_t value, int width) {
// TODO(jbramley): Optimize this for ARM64 hosts.
DCHECK((width == 32) || (width == 64));
int count = 0;
uint64_t bit_test = 1UL << (width - 1);
while ((count < width) && ((bit_test & value) == 0)) {
count++;
bit_test >>= 1;
DCHECK(base::bits::IsPowerOfTwo(width) && (width <= 64));
if (value == 0) {
return width;
}
return count;
return base::bits::CountLeadingZeros64(value << (64 - width));
}
int CountLeadingSignBits(int64_t value, int width) {
// TODO(jbramley): Optimize this for ARM64 hosts.
DCHECK((width == 32) || (width == 64));
DCHECK(base::bits::IsPowerOfTwo(width) && (width <= 64));
if (value >= 0) {
return CountLeadingZeros(value, width) - 1;
} else {
@ -38,43 +93,32 @@ int CountLeadingSignBits(int64_t value, int width) {
int CountTrailingZeros(uint64_t value, int width) {
// TODO(jbramley): Optimize this for ARM64 hosts.
DCHECK((width == 32) || (width == 64));
int count = 0;
while ((count < width) && (((value >> count) & 1) == 0)) {
count++;
if (width == 64) {
return static_cast<int>(base::bits::CountTrailingZeros64(value));
}
return count;
return static_cast<int>(base::bits::CountTrailingZeros32(
static_cast<uint32_t>(value & 0xfffffffff)));
}
int CountSetBits(uint64_t value, int width) {
// TODO(jbramley): Would it be useful to allow other widths? The
// implementation already supports them.
DCHECK((width == 32) || (width == 64));
if (width == 64) {
return static_cast<int>(base::bits::CountPopulation64(value));
}
return static_cast<int>(base::bits::CountPopulation32(
static_cast<uint32_t>(value & 0xfffffffff)));
}
// Mask out unused bits to ensure that they are not counted.
value &= (0xffffffffffffffffUL >> (64-width));
int LowestSetBitPosition(uint64_t value) {
DCHECK_NE(value, 0U);
return CountTrailingZeros(value, 64) + 1;
}
// Add up the set bits.
// The algorithm works by adding pairs of bit fields together iteratively,
// where the size of each bit field doubles each time.
// An example for an 8-bit value:
// Bits: h g f e d c b a
// \ | \ | \ | \ |
// value = h+g f+e d+c b+a
// \ | \ |
// value = h+g+f+e d+c+b+a
// \ |
// value = h+g+f+e+d+c+b+a
value = ((value >> 1) & 0x5555555555555555) + (value & 0x5555555555555555);
value = ((value >> 2) & 0x3333333333333333) + (value & 0x3333333333333333);
value = ((value >> 4) & 0x0f0f0f0f0f0f0f0f) + (value & 0x0f0f0f0f0f0f0f0f);
value = ((value >> 8) & 0x00ff00ff00ff00ff) + (value & 0x00ff00ff00ff00ff);
value = ((value >> 16) & 0x0000ffff0000ffff) + (value & 0x0000ffff0000ffff);
value = ((value >> 32) & 0x00000000ffffffff) + (value & 0x00000000ffffffff);
return static_cast<int>(value);
int HighestSetBitPosition(uint64_t value) {
DCHECK_NE(value, 0U);
return 63 - CountLeadingZeros(value, 64);
}
@ -84,7 +128,7 @@ uint64_t LargestPowerOf2Divisor(uint64_t value) {
int MaskToBit(uint64_t mask) {
DCHECK(CountSetBits(mask, 64) == 1);
DCHECK_EQ(CountSetBits(mask, 64), 1);
return CountTrailingZeros(mask, 64);
}

View File

@ -8,6 +8,7 @@
#include <cmath>
#include "src/arm64/constants-arm64.h"
#include "src/utils.h"
namespace v8 {
namespace internal {
@ -16,40 +17,26 @@ namespace internal {
STATIC_ASSERT((static_cast<int32_t>(-1) >> 1) == -1);
STATIC_ASSERT((static_cast<uint32_t>(-1) >> 1) == 0x7FFFFFFF);
// Floating point representation.
static inline uint32_t float_to_rawbits(float value) {
uint32_t bits = 0;
memcpy(&bits, &value, 4);
return bits;
}
uint32_t float_sign(float val);
uint32_t float_exp(float val);
uint32_t float_mantissa(float val);
uint32_t double_sign(double val);
uint32_t double_exp(double val);
uint64_t double_mantissa(double val);
float float_pack(uint32_t sign, uint32_t exp, uint32_t mantissa);
double double_pack(uint64_t sign, uint64_t exp, uint64_t mantissa);
static inline uint64_t double_to_rawbits(double value) {
uint64_t bits = 0;
memcpy(&bits, &value, 8);
return bits;
}
static inline float rawbits_to_float(uint32_t bits) {
float value = 0.0;
memcpy(&value, &bits, 4);
return value;
}
static inline double rawbits_to_double(uint64_t bits) {
double value = 0.0;
memcpy(&value, &bits, 8);
return value;
}
// An fpclassify() function for 16-bit half-precision floats.
int float16classify(float16 value);
// Bit counting.
int CountLeadingZeros(uint64_t value, int width);
int CountLeadingSignBits(int64_t value, int width);
int CountTrailingZeros(uint64_t value, int width);
int CountSetBits(uint64_t value, int width);
int LowestSetBitPosition(uint64_t value);
int HighestSetBitPosition(uint64_t value);
uint64_t LargestPowerOf2Divisor(uint64_t value);
int MaskToBit(uint64_t mask);
@ -86,7 +73,7 @@ T ReverseBytes(T value, int block_bytes_log2) {
// NaN tests.
inline bool IsSignallingNaN(double num) {
uint64_t raw = double_to_rawbits(num);
uint64_t raw = bit_cast<uint64_t>(num);
if (std::isnan(num) && ((raw & kDQuietNanMask) == 0)) {
return true;
}
@ -95,13 +82,17 @@ inline bool IsSignallingNaN(double num) {
inline bool IsSignallingNaN(float num) {
uint32_t raw = float_to_rawbits(num);
uint32_t raw = bit_cast<uint32_t>(num);
if (std::isnan(num) && ((raw & kSQuietNanMask) == 0)) {
return true;
}
return false;
}
inline bool IsSignallingNaN(float16 num) {
const uint16_t kFP16QuietNaNMask = 0x0200;
return (float16classify(num) == FP_NAN) && ((num & kFP16QuietNaNMask) == 0);
}
template <typename T>
inline bool IsQuietNaN(T num) {
@ -112,13 +103,14 @@ inline bool IsQuietNaN(T num) {
// Convert the NaN in 'num' to a quiet NaN.
inline double ToQuietNaN(double num) {
DCHECK(std::isnan(num));
return rawbits_to_double(double_to_rawbits(num) | kDQuietNanMask);
return bit_cast<double>(bit_cast<uint64_t>(num) | kDQuietNanMask);
}
inline float ToQuietNaN(float num) {
DCHECK(std::isnan(num));
return rawbits_to_float(float_to_rawbits(num) | kSQuietNanMask);
return bit_cast<float>(bit_cast<uint32_t>(num) |
static_cast<uint32_t>(kSQuietNanMask));
}

View File

@ -6,3 +6,5 @@ clemensh@chromium.org
mtrofin@chromium.org
rossberg@chromium.org
titzer@chromium.org
# COMPONENT: Blink>JavaScript>WebAssembly

View File

@ -4,8 +4,6 @@
#include "src/asmjs/asm-js.h"
#include "src/api-natives.h"
#include "src/api.h"
#include "src/asmjs/asm-names.h"
#include "src/asmjs/asm-parser.h"
#include "src/assert-scope.h"
@ -17,7 +15,8 @@
#include "src/handles.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/objects.h"
#include "src/parsing/scanner-character-streams.h"
#include "src/parsing/scanner.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-js.h"
@ -54,12 +53,12 @@ bool IsStdlibMemberValid(Isolate* isolate, Handle<JSReceiver> stdlib,
bool* is_typed_array) {
switch (member) {
case wasm::AsmJsParser::StandardMember::kInfinity: {
Handle<Name> name = isolate->factory()->infinity_string();
Handle<Name> name = isolate->factory()->Infinity_string();
Handle<Object> value = JSReceiver::GetDataProperty(stdlib, name);
return value->IsNumber() && std::isinf(value->Number());
}
case wasm::AsmJsParser::StandardMember::kNaN: {
Handle<Name> name = isolate->factory()->nan_string();
Handle<Name> name = isolate->factory()->NaN_string();
Handle<Object> value = JSReceiver::GetDataProperty(stdlib, name);
return value->IsNaN();
}
@ -105,7 +104,6 @@ bool IsStdlibMemberValid(Isolate* isolate, Handle<JSReceiver> stdlib,
#undef STDLIB_ARRAY_TYPE
}
UNREACHABLE();
return false;
}
void Report(Handle<Script> script, int position, Vector<const char> text,
@ -193,9 +191,11 @@ MaybeHandle<FixedArray> AsmJs::CompileAsmViaWasm(CompilationInfo* info) {
Zone* compile_zone = info->zone();
Zone translate_zone(info->isolate()->allocator(), ZONE_NAME);
wasm::AsmJsParser parser(info->isolate(), &translate_zone, info->script(),
info->literal()->start_position(),
info->literal()->end_position());
std::unique_ptr<Utf16CharacterStream> stream(ScannerStream::For(
handle(String::cast(info->script()->source())),
info->literal()->start_position(), info->literal()->end_position()));
uintptr_t stack_limit = info->isolate()->stack_guard()->real_climit();
wasm::AsmJsParser parser(&translate_zone, stack_limit, std::move(stream));
if (!parser.Run()) {
DCHECK(!info->isolate()->has_pending_exception());
ReportCompilationFailure(info->script(), parser.failure_location(),
@ -277,7 +277,7 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(Isolate* isolate,
ReportInstantiationFailure(script, position, "Requires standard library");
return MaybeHandle<Object>();
}
int member_id = Smi::cast(stdlib_uses->get(i))->value();
int member_id = Smi::ToInt(stdlib_uses->get(i));
wasm::AsmJsParser::StandardMember member =
static_cast<wasm::AsmJsParser::StandardMember>(member_id);
if (!IsStdlibMemberValid(isolate, stdlib, member,
@ -287,16 +287,6 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(Isolate* isolate,
}
}
// Create the ffi object for foreign functions {"": foreign}.
Handle<JSObject> ffi_object;
if (!foreign.is_null()) {
Handle<JSFunction> object_function = Handle<JSFunction>(
isolate->native_context()->object_function(), isolate);
ffi_object = isolate->factory()->NewJSObject(object_function);
JSObject::AddProperty(ffi_object, isolate->factory()->empty_string(),
foreign, NONE);
}
// Check that a valid heap buffer is provided if required.
if (stdlib_use_of_typed_array_present) {
if (memory.is_null()) {
@ -314,8 +304,9 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(Isolate* isolate,
wasm::ErrorThrower thrower(isolate, "AsmJs::Instantiate");
MaybeHandle<Object> maybe_module_object =
wasm::SyncInstantiate(isolate, &thrower, module, ffi_object, memory);
wasm::SyncInstantiate(isolate, &thrower, module, foreign, memory);
if (maybe_module_object.is_null()) {
DCHECK(!isolate->has_pending_exception());
thrower.Reset(); // Ensure exceptions do not propagate.
ReportInstantiationFailure(script, position, "Internal wasm failure");
return MaybeHandle<Object>();

View File

@ -11,9 +11,8 @@
#include "src/asmjs/asm-js.h"
#include "src/asmjs/asm-types.h"
#include "src/objects-inl.h"
#include "src/objects.h"
#include "src/parsing/scanner-character-streams.h"
#include "src/base/optional.h"
#include "src/objects-inl.h" // TODO(mstarzinger): Temporary cycle breaker.
#include "src/parsing/scanner.h"
#include "src/wasm/wasm-opcodes.h"
@ -68,16 +67,16 @@ namespace wasm {
#define TOK(name) AsmJsScanner::kToken_##name
AsmJsParser::AsmJsParser(Isolate* isolate, Zone* zone, Handle<Script> script,
int start, int end)
AsmJsParser::AsmJsParser(Zone* zone, uintptr_t stack_limit,
std::unique_ptr<Utf16CharacterStream> stream)
: zone_(zone),
module_builder_(new (zone) WasmModuleBuilder(zone)),
return_type_(nullptr),
stack_limit_(isolate->stack_guard()->real_climit()),
stack_limit_(stack_limit),
global_var_info_(zone),
local_var_info_(zone),
failed_(false),
failure_location_(start),
failure_location_(kNoSourcePosition),
stdlib_name_(kTokenNone),
foreign_name_(kTokenNone),
heap_name_(kTokenNone),
@ -89,9 +88,6 @@ AsmJsParser::AsmJsParser(Isolate* isolate, Zone* zone, Handle<Script> script,
pending_label_(0),
global_imports_(zone) {
InitializeStdlibTypes();
Handle<String> source(String::cast(script->source()), isolate);
std::unique_ptr<Utf16CharacterStream> stream(
ScannerStream::For(source, start, end));
scanner_.SetStream(std::move(stream));
}
@ -144,8 +140,8 @@ void AsmJsParser::InitializeStdlibTypes() {
stdlib_fround_ = AsmType::FroundType(zone());
}
FunctionSig* AsmJsParser::ConvertSignature(
AsmType* return_type, const std::vector<AsmType*>& params) {
FunctionSig* AsmJsParser::ConvertSignature(AsmType* return_type,
const ZoneVector<AsmType*>& params) {
FunctionSig::Builder sig_builder(
zone(), !return_type->IsA(AsmType::Void()) ? 1 : 0, params.size());
for (auto param : params) {
@ -215,7 +211,6 @@ wasm::AsmJsParser::VarInfo* AsmJsParser::GetVarInfo(
return &local_var_info_[index];
}
UNREACHABLE();
return nullptr;
}
uint32_t AsmJsParser::VarIndex(VarInfo* info) {
@ -348,9 +343,15 @@ void AsmJsParser::ValidateModule() {
if (info.kind == VarKind::kTable && !info.function_defined) {
FAIL("Undefined function table");
}
if (info.kind == VarKind::kImportedFunction && !info.function_defined) {
// For imported functions without a single call site, we insert a dummy
// import here to preserve the fact that there actually was an import.
FunctionSig* void_void_sig = FunctionSig::Builder(zone(), 0, 0).Build();
module_builder_->AddImport(info.import->function_name, void_void_sig);
}
}
// Add start function to init things.
// Add start function to initialize things.
WasmFunctionBuilder* start = module_builder_->AddFunction();
module_builder_->MarkStartFunction(start);
for (auto& global_import : global_imports_) {
@ -725,9 +726,9 @@ void AsmJsParser::ValidateFunction() {
int start_position = static_cast<int>(scanner_.Position());
current_function_builder_->SetAsmFunctionStartPosition(start_position);
std::vector<AsmType*> params;
CachedVector<AsmType*> params(cached_asm_type_p_vectors_);
ValidateFunctionParams(&params);
std::vector<ValueType> locals;
CachedVector<ValueType> locals(cached_valuetype_vectors_);
ValidateFunctionLocals(params.size(), &locals);
function_temp_locals_offset_ = static_cast<uint32_t>(
@ -787,13 +788,14 @@ void AsmJsParser::ValidateFunction() {
}
// 6.4 ValidateFunction
void AsmJsParser::ValidateFunctionParams(std::vector<AsmType*>* params) {
void AsmJsParser::ValidateFunctionParams(ZoneVector<AsmType*>* params) {
// TODO(bradnelson): Do this differently so that the scanner doesn't need to
// have a state transition that needs knowledge of how the scanner works
// inside.
scanner_.EnterLocalScope();
EXPECT_TOKEN('(');
std::vector<AsmJsScanner::token_t> function_parameters;
CachedVector<AsmJsScanner::token_t> function_parameters(
cached_token_t_vectors_);
while (!failed_ && !Peek(')')) {
if (!scanner_.IsLocal()) {
FAIL("Expected parameter name");
@ -847,8 +849,8 @@ void AsmJsParser::ValidateFunctionParams(std::vector<AsmType*>* params) {
}
// 6.4 ValidateFunction - locals
void AsmJsParser::ValidateFunctionLocals(
size_t param_count, std::vector<ValueType>* locals) {
void AsmJsParser::ValidateFunctionLocals(size_t param_count,
ZoneVector<ValueType>* locals) {
// Local Variables.
while (Peek(TOK(var))) {
scanner_.EnterLocalScope();
@ -1262,7 +1264,7 @@ void AsmJsParser::SwitchStatement() {
Begin(pending_label_);
pending_label_ = 0;
// TODO(bradnelson): Make less weird.
std::vector<int32_t> cases;
CachedVector<int32_t> cases(cached_int_vectors_);
GatherCases(&cases);
EXPECT_TOKEN('{');
size_t count = cases.size() + 1;
@ -1398,7 +1400,6 @@ AsmType* AsmJsParser::Identifier() {
return info->type;
}
UNREACHABLE();
return nullptr;
}
// 6.8.4 CallExpression
@ -1677,7 +1678,7 @@ AsmType* AsmJsParser::MultiplicativeExpression() {
}
} else if (Check('/')) {
AsmType* b;
RECURSEn(b = MultiplicativeExpression());
RECURSEn(b = UnaryExpression());
if (a->IsA(AsmType::DoubleQ()) && b->IsA(AsmType::DoubleQ())) {
current_function_builder_->Emit(kExprF64Div);
a = AsmType::Double();
@ -1695,7 +1696,7 @@ AsmType* AsmJsParser::MultiplicativeExpression() {
}
} else if (Check('%')) {
AsmType* b;
RECURSEn(b = MultiplicativeExpression());
RECURSEn(b = UnaryExpression());
if (a->IsA(AsmType::DoubleQ()) && b->IsA(AsmType::DoubleQ())) {
current_function_builder_->Emit(kExprF64Mod);
a = AsmType::Double();
@ -2014,8 +2015,7 @@ AsmType* AsmJsParser::ValidateCall() {
// both cases we might be seeing the {function_name} for the first time and
// hence allocate a {VarInfo} here, all subsequent uses of the same name then
// need to match the information stored at this point.
// TODO(mstarzinger): Consider using Chromiums base::Optional instead.
std::unique_ptr<TemporaryVariableScope> tmp;
base::Optional<TemporaryVariableScope> tmp;
if (Check('[')) {
RECURSEn(EqualityExpression());
EXPECT_TOKENn('&');
@ -2023,7 +2023,7 @@ AsmType* AsmJsParser::ValidateCall() {
if (!CheckForUnsigned(&mask)) {
FAILn("Expected mask literal");
}
if (!base::bits::IsPowerOfTwo32(mask + 1)) {
if (!base::bits::IsPowerOfTwo(mask + 1)) {
FAILn("Expected power of 2 mask");
}
current_function_builder_->EmitI32Const(mask);
@ -2050,8 +2050,8 @@ AsmType* AsmJsParser::ValidateCall() {
current_function_builder_->EmitI32Const(function_info->index);
current_function_builder_->Emit(kExprI32Add);
// We have to use a temporary for the correct order of evaluation.
tmp.reset(new TemporaryVariableScope(this));
current_function_builder_->EmitSetLocal(tmp.get()->get());
tmp.emplace(this);
current_function_builder_->EmitSetLocal(tmp->get());
// The position of function table calls is after the table lookup.
call_pos = static_cast<int>(scanner_.Position());
} else {
@ -2070,8 +2070,8 @@ AsmType* AsmJsParser::ValidateCall() {
}
// Parse argument list and gather types.
std::vector<AsmType*> param_types;
ZoneVector<AsmType*> param_specific_types(zone());
CachedVector<AsmType*> param_types(cached_asm_type_p_vectors_);
CachedVector<AsmType*> param_specific_types(cached_asm_type_p_vectors_);
EXPECT_TOKENn('(');
while (!failed_ && !Peek(')')) {
AsmType* t;
@ -2149,10 +2149,12 @@ AsmType* AsmJsParser::ValidateCall() {
auto it = function_info->import->cache.find(sig);
if (it != function_info->import->cache.end()) {
index = it->second;
DCHECK(function_info->function_defined);
} else {
index =
module_builder_->AddImport(function_info->import->function_name, sig);
function_info->import->cache[sig] = index;
function_info->function_defined = true;
}
current_function_builder_->AddAsmWasmOffset(call_pos, to_number_pos);
current_function_builder_->EmitWithU32V(kExprCallFunction, index);
@ -2283,7 +2285,7 @@ AsmType* AsmJsParser::ValidateCall() {
}
}
if (function_info->kind == VarKind::kTable) {
current_function_builder_->EmitGetLocal(tmp.get()->get());
current_function_builder_->EmitGetLocal(tmp->get());
current_function_builder_->AddAsmWasmOffset(call_pos, to_number_pos);
current_function_builder_->Emit(kExprCallIndirect);
current_function_builder_->EmitU32V(signature_index);
@ -2420,7 +2422,7 @@ void AsmJsParser::ScanToClosingParenthesis() {
}
}
void AsmJsParser::GatherCases(std::vector<int32_t>* cases) {
void AsmJsParser::GatherCases(ZoneVector<int32_t>* cases) {
size_t start = scanner_.Position();
int depth = 0;
for (;;) {

View File

@ -5,8 +5,8 @@
#ifndef V8_ASMJS_ASM_PARSER_H_
#define V8_ASMJS_ASM_PARSER_H_
#include <memory>
#include <string>
#include <vector>
#include "src/asmjs/asm-scanner.h"
#include "src/asmjs/asm-types.h"
@ -15,6 +15,9 @@
namespace v8 {
namespace internal {
class Utf16CharacterStream;
namespace wasm {
// A custom parser + validator + wasm converter for asm.js:
@ -46,8 +49,8 @@ class AsmJsParser {
typedef std::unordered_set<StandardMember, std::hash<int>> StdlibSet;
explicit AsmJsParser(Isolate* isolate, Zone* zone, Handle<Script> script,
int start, int end);
explicit AsmJsParser(Zone* zone, uintptr_t stack_limit,
std::unique_ptr<Utf16CharacterStream> stream);
bool Run();
const char* failure_message() const { return failure_message_; }
int failure_location() const { return failure_location_; }
@ -105,6 +108,41 @@ class AsmJsParser {
// Helper class to make {TempVariable} safe for nesting.
class TemporaryVariableScope;
template <typename T>
class CachedVectors {
public:
explicit CachedVectors(Zone* zone) : reusable_vectors_(zone) {}
Zone* zone() const { return reusable_vectors_.get_allocator().zone(); }
inline void fill(ZoneVector<T>* vec) {
if (reusable_vectors_.empty()) return;
reusable_vectors_.back().swap(*vec);
reusable_vectors_.pop_back();
vec->clear();
}
inline void reuse(ZoneVector<T>* vec) {
reusable_vectors_.emplace_back(std::move(*vec));
}
private:
ZoneVector<ZoneVector<T>> reusable_vectors_;
};
template <typename T>
class CachedVector final : public ZoneVector<T> {
public:
explicit CachedVector(CachedVectors<T>& cache)
: ZoneVector<T>(cache.zone()), cache_(&cache) {
cache.fill(this);
}
~CachedVector() { cache_->reuse(this); }
private:
CachedVectors<T>* cache_;
};
Zone* zone_;
AsmJsScanner scanner_;
WasmModuleBuilder* module_builder_;
@ -115,6 +153,11 @@ class AsmJsParser {
ZoneVector<VarInfo> global_var_info_;
ZoneVector<VarInfo> local_var_info_;
CachedVectors<ValueType> cached_valuetype_vectors_{zone_};
CachedVectors<AsmType*> cached_asm_type_p_vectors_{zone_};
CachedVectors<AsmJsScanner::token_t> cached_token_t_vectors_{zone_};
CachedVectors<int32_t> cached_int_vectors_{zone_};
int function_temp_locals_offset_;
int function_temp_locals_used_;
int function_temp_locals_depth_;
@ -267,7 +310,7 @@ class AsmJsParser {
void InitializeStdlibTypes();
FunctionSig* ConvertSignature(AsmType* return_type,
const std::vector<AsmType*>& params);
const ZoneVector<AsmType*>& params);
void ValidateModule(); // 6.1 ValidateModule
void ValidateModuleParameters(); // 6.1 ValidateModule - parameters
@ -281,9 +324,9 @@ class AsmJsParser {
void ValidateExport(); // 6.2 ValidateExport
void ValidateFunctionTable(); // 6.3 ValidateFunctionTable
void ValidateFunction(); // 6.4 ValidateFunction
void ValidateFunctionParams(std::vector<AsmType*>* params);
void ValidateFunctionParams(ZoneVector<AsmType*>* params);
void ValidateFunctionLocals(size_t param_count,
std::vector<ValueType>* locals);
ZoneVector<ValueType>* locals);
void ValidateStatement(); // 6.5 ValidateStatement
void Block(); // 6.5.1 Block
void ExpressionStatement(); // 6.5.2 ExpressionStatement
@ -331,7 +374,7 @@ class AsmJsParser {
// Used as part of {SwitchStatement}. Collects all case labels in the current
// switch-statement, then resets the scanner position. This is one piece that
// makes this parser not be a pure single-pass.
void GatherCases(std::vector<int32_t>* cases);
void GatherCases(ZoneVector<int32_t>* cases);
};
} // namespace wasm

View File

@ -46,6 +46,10 @@ AsmJsScanner::AsmJsScanner()
#undef V
}
// Destructor of unique_ptr<T> requires complete declaration of T, we only want
// to include the necessary declaration here instead of the header file.
AsmJsScanner::~AsmJsScanner() {}
void AsmJsScanner::SetStream(std::unique_ptr<Utf16CharacterStream> stream) {
stream_ = std::move(stream);
Next();
@ -208,7 +212,6 @@ std::string AsmJsScanner::Name(token_t token) const {
break;
}
UNREACHABLE();
return "{unreachable}";
}
#endif

View File

@ -32,6 +32,8 @@ class V8_EXPORT_PRIVATE AsmJsScanner {
typedef int32_t token_t;
AsmJsScanner();
~AsmJsScanner();
// Pick the stream to parse (must be called before anything else).
void SetStream(std::unique_ptr<Utf16CharacterStream> stream);

View File

@ -69,7 +69,6 @@ bool AsmType::IsA(AsmType* that) {
}
UNREACHABLE();
return that == this;
}
int32_t AsmType::ElementSizeInBytes() {

View File

@ -23,8 +23,6 @@
#include "src/mips64/assembler-mips64-inl.h"
#elif V8_TARGET_ARCH_S390
#include "src/s390/assembler-s390-inl.h"
#elif V8_TARGET_ARCH_X87
#include "src/x87/assembler-x87-inl.h"
#else
#error Unknown architecture.
#endif

View File

@ -55,6 +55,7 @@
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/interpreter/interpreter.h"
#include "src/isolate.h"
#include "src/ostreams.h"
#include "src/regexp/jsregexp.h"
#include "src/regexp/regexp-macro-assembler.h"
@ -84,8 +85,6 @@
#include "src/regexp/mips64/regexp-macro-assembler-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_S390
#include "src/regexp/s390/regexp-macro-assembler-s390.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/regexp/x87/regexp-macro-assembler-x87.h" // NOLINT
#else // Unknown architecture.
#error "Unknown architecture."
#endif // Target architecture.
@ -144,8 +143,7 @@ const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
// Implementation of AssemblerBase
AssemblerBase::IsolateData::IsolateData(Isolate* isolate)
: serializer_enabled_(isolate->serializer_enabled()),
max_old_generation_size_(isolate->heap()->MaxOldGenerationSize())
: serializer_enabled_(isolate->serializer_enabled())
#if V8_TARGET_ARCH_X64
,
code_range_start_(
@ -267,16 +265,12 @@ unsigned CpuFeatures::dcache_line_size_ = 0;
// 01: code_target: [6-bit pc delta] 01
//
// 10: short_data_record: [6-bit pc delta] 10 followed by
// [6-bit data delta] [2-bit data type tag]
// [8-bit data delta]
//
// 11: long_record [6 bit reloc mode] 11
// followed by pc delta
// followed by optional data depending on type.
//
// 1-bit data type tags, used in short_data_record and data_jump long_record:
// code_target_with_id: 0
// deopt_reason: 1
//
// If a pc delta exceeds 6 bits, it is split into a remainder that fits into
// 6 bits and a part that does not. The latter is encoded as a long record
// with PC_JUMP as pseudo reloc info mode. The former is encoded as part of
@ -292,8 +286,6 @@ unsigned CpuFeatures::dcache_line_size_ = 0;
const int kTagBits = 2;
const int kTagMask = (1 << kTagBits) - 1;
const int kLongTagBits = 6;
const int kShortDataTypeTagBits = 1;
const int kShortDataBits = kBitsPerByte - kShortDataTypeTagBits;
const int kEmbeddedObjectTag = 0;
const int kCodeTargetTag = 1;
@ -310,14 +302,10 @@ const int kLastChunkTagBits = 1;
const int kLastChunkTagMask = 1;
const int kLastChunkTag = 1;
const int kCodeWithIdTag = 0;
const int kDeoptReasonTag = 1;
void RelocInfo::update_wasm_memory_reference(
Isolate* isolate, Address old_base, Address new_base,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsWasmMemoryReference(rmode_));
DCHECK_GE(wasm_memory_reference(), old_base);
Address updated_reference = new_base + (wasm_memory_reference() - old_base);
// The reference is not checked here but at runtime. Validity of references
// may change over time.
@ -399,9 +387,8 @@ void RelocInfoWriter::WriteShortTaggedPC(uint32_t pc_delta, int tag) {
*--pos_ = pc_delta << kTagBits | tag;
}
void RelocInfoWriter::WriteShortTaggedData(intptr_t data_delta, int tag) {
*--pos_ = static_cast<byte>(data_delta << kShortDataTypeTagBits | tag);
void RelocInfoWriter::WriteShortData(intptr_t data_delta) {
*--pos_ = static_cast<byte>(data_delta);
}
@ -453,24 +440,10 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
} else if (rmode == RelocInfo::CODE_TARGET) {
WriteShortTaggedPC(pc_delta, kCodeTargetTag);
DCHECK(begin_pos - pos_ <= RelocInfo::kMaxCallSize);
} else if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
// Use signed delta-encoding for id.
DCHECK_EQ(static_cast<int>(rinfo->data()), rinfo->data());
int id_delta = static_cast<int>(rinfo->data()) - last_id_;
// Check if delta is small enough to fit in a tagged byte.
if (is_intn(id_delta, kShortDataBits)) {
WriteShortTaggedPC(pc_delta, kLocatableTag);
WriteShortTaggedData(id_delta, kCodeWithIdTag);
} else {
// Otherwise, use costly encoding.
WriteModeAndPC(pc_delta, rmode);
WriteIntData(id_delta);
}
last_id_ = static_cast<int>(rinfo->data());
} else if (rmode == RelocInfo::DEOPT_REASON) {
DCHECK(rinfo->data() < (1 << kShortDataBits));
DCHECK(rinfo->data() < (1 << kBitsPerByte));
WriteShortTaggedPC(pc_delta, kLocatableTag);
WriteShortTaggedData(rinfo->data(), kDeoptReasonTag);
WriteShortData(rinfo->data());
} else {
WriteModeAndPC(pc_delta, rmode);
if (RelocInfo::IsComment(rmode)) {
@ -511,16 +484,6 @@ inline void RelocIterator::AdvanceReadPC() {
}
void RelocIterator::AdvanceReadId() {
int x = 0;
for (int i = 0; i < kIntSize; i++) {
x |= static_cast<int>(*--pos_) << i * kBitsPerByte;
}
last_id_ += x;
rinfo_.data_ = last_id_;
}
void RelocIterator::AdvanceReadInt() {
int x = 0;
for (int i = 0; i < kIntSize; i++) {
@ -554,23 +517,9 @@ void RelocIterator::AdvanceReadLongPCJump() {
rinfo_.pc_ += pc_jump << kSmallPCDeltaBits;
}
inline int RelocIterator::GetShortDataTypeTag() {
return *pos_ & ((1 << kShortDataTypeTagBits) - 1);
}
inline void RelocIterator::ReadShortTaggedId() {
int8_t signed_b = *pos_;
// Signed right shift is arithmetic shift. Tested in test-utils.cc.
last_id_ += signed_b >> kShortDataTypeTagBits;
rinfo_.data_ = last_id_;
}
inline void RelocIterator::ReadShortTaggedData() {
inline void RelocIterator::ReadShortData() {
uint8_t unsigned_b = *pos_;
rinfo_.data_ = unsigned_b >> kShortDataTypeTagBits;
rinfo_.data_ = unsigned_b;
}
@ -592,18 +541,9 @@ void RelocIterator::next() {
} else if (tag == kLocatableTag) {
ReadShortTaggedPC();
Advance();
int data_type_tag = GetShortDataTypeTag();
if (data_type_tag == kCodeWithIdTag) {
if (SetMode(RelocInfo::CODE_TARGET_WITH_ID)) {
ReadShortTaggedId();
return;
}
} else {
DCHECK(data_type_tag == kDeoptReasonTag);
if (SetMode(RelocInfo::DEOPT_REASON)) {
ReadShortTaggedData();
return;
}
if (SetMode(RelocInfo::DEOPT_REASON)) {
ReadShortData();
return;
}
} else {
DCHECK(tag == kDefaultTag);
@ -612,13 +552,7 @@ void RelocIterator::next() {
AdvanceReadLongPCJump();
} else {
AdvanceReadPC();
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
if (SetMode(rmode)) {
AdvanceReadId();
return;
}
Advance(kIntSize);
} else if (RelocInfo::IsComment(rmode)) {
if (RelocInfo::IsComment(rmode)) {
if (SetMode(rmode)) {
AdvanceReadData();
return;
@ -661,7 +595,6 @@ RelocIterator::RelocIterator(Code* code, int mode_mask) {
end_ = code->relocation_start();
done_ = false;
mode_mask_ = mode_mask;
last_id_ = 0;
byte* sequence = code->FindCodeAgeSequence();
// We get the isolate from the map, because at serialization time
// the code pointer has been cloned and isn't really in heap space.
@ -683,7 +616,6 @@ RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) {
end_ = pos_ - desc.reloc_size;
done_ = false;
mode_mask_ = mode_mask;
last_id_ = 0;
code_age_sequence_ = NULL;
if (mode_mask_ == 0) pos_ = end_;
next();
@ -723,8 +655,6 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "embedded object";
case CODE_TARGET:
return "code target";
case CODE_TARGET_WITH_ID:
return "code target with id";
case CELL:
return "property cell";
case RUNTIME_ENTRY:
@ -772,7 +702,6 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
case NUMBER_OF_MODES:
case PC_JUMP:
UNREACHABLE();
return "number_of_modes";
}
return "unknown relocation type";
}
@ -799,9 +728,6 @@ void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT
Code* code = Code::GetCodeFromTargetAddress(target_address());
os << " (" << Code::Kind2String(code->kind()) << ") ("
<< static_cast<const void*>(target_address()) << ")";
if (rmode_ == CODE_TARGET_WITH_ID) {
os << " (id=" << static_cast<int>(data_) << ")";
}
} else if (IsRuntimeEntry(rmode_) &&
isolate->deoptimizer_data() != NULL) {
// Depotimization bailouts are stored as runtime entries.
@ -828,7 +754,6 @@ void RelocInfo::Verify(Isolate* isolate) {
case CELL:
Object::VerifyPointer(target_cell());
break;
case CODE_TARGET_WITH_ID:
case CODE_TARGET: {
// convert inline target address to code object
Address addr = target_address();
@ -895,7 +820,6 @@ static ExternalReference::Type BuiltinCallTypeForResultSize(int result_size) {
return ExternalReference::BUILTIN_CALL_TRIPLE;
}
UNREACHABLE();
return ExternalReference::BUILTIN_CALL;
}
@ -951,10 +875,8 @@ ExternalReference ExternalReference::interpreter_dispatch_counters(
ExternalReference::ExternalReference(StatsCounter* counter)
: address_(reinterpret_cast<Address>(counter->GetInternalPointer())) {}
ExternalReference::ExternalReference(Isolate::AddressId id, Isolate* isolate)
: address_(isolate->get_address_from_id(id)) {}
ExternalReference::ExternalReference(IsolateAddressId id, Isolate* isolate)
: address_(isolate->get_address_from_id(id)) {}
ExternalReference::ExternalReference(const SCTableReference& table_ref)
: address_(table_ref.address()) {}
@ -1015,6 +937,13 @@ ExternalReference ExternalReference::date_cache_stamp(Isolate* isolate) {
return ExternalReference(isolate->date_cache()->stamp_address());
}
void ExternalReference::set_redirector(
Isolate* isolate, ExternalReferenceRedirector* redirector) {
// We can't stack them.
DCHECK(isolate->external_reference_redirector() == NULL);
isolate->set_external_reference_redirector(
reinterpret_cast<ExternalReferenceRedirectorPointer*>(redirector));
}
ExternalReference ExternalReference::stress_deopt_count(Isolate* isolate) {
return ExternalReference(isolate->stress_deopt_count_address());
@ -1393,8 +1322,6 @@ ExternalReference ExternalReference::re_check_stack_guard_state(
function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState);
#elif V8_TARGET_ARCH_S390
function = FUNCTION_ADDR(RegExpMacroAssemblerS390::CheckStackGuardState);
#elif V8_TARGET_ARCH_X87
function = FUNCTION_ADDR(RegExpMacroAssemblerX87::CheckStackGuardState);
#else
UNREACHABLE();
#endif
@ -1578,6 +1505,19 @@ ExternalReference ExternalReference::search_string_raw(Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f)));
}
ExternalReference ExternalReference::orderedhashmap_gethash_raw(
Isolate* isolate) {
auto f = OrderedHashMap::GetHash;
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f)));
}
template <typename CollectionType, int entrysize>
ExternalReference ExternalReference::orderedhashtable_has_raw(
Isolate* isolate) {
auto f = OrderedHashTable<CollectionType, entrysize>::HasKey;
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f)));
}
ExternalReference ExternalReference::try_internalize_string_function(
Isolate* isolate) {
return ExternalReference(Redirect(
@ -1608,6 +1548,11 @@ ExternalReference::search_string_raw<const uc16, const uint8_t>(Isolate*);
template ExternalReference
ExternalReference::search_string_raw<const uc16, const uc16>(Isolate*);
template ExternalReference
ExternalReference::orderedhashtable_has_raw<OrderedHashMap, 2>(Isolate*);
template ExternalReference
ExternalReference::orderedhashtable_has_raw<OrderedHashSet, 1>(Isolate*);
ExternalReference ExternalReference::page_flags(Page* page) {
return ExternalReference(reinterpret_cast<Address>(page) +
MemoryChunk::kFlagsOffset);
@ -1624,11 +1569,6 @@ ExternalReference ExternalReference::cpu_features() {
return ExternalReference(&CpuFeatures::supported_);
}
ExternalReference ExternalReference::is_tail_call_elimination_enabled_address(
Isolate* isolate) {
return ExternalReference(isolate->is_tail_call_elimination_enabled_address());
}
ExternalReference ExternalReference::promise_hook_or_debug_is_active_address(
Isolate* isolate) {
return ExternalReference(isolate->promise_hook_or_debug_is_active_address());
@ -1959,6 +1899,17 @@ int ConstantPoolBuilder::Emit(Assembler* assm) {
return !empty ? emitted_label_.pos() : 0;
}
HeapObjectRequest::HeapObjectRequest(double heap_number, int offset)
: kind_(kHeapNumber), offset_(offset) {
value_.heap_number = heap_number;
DCHECK(!IsSmiDouble(value_.heap_number));
}
HeapObjectRequest::HeapObjectRequest(CodeStub* code_stub, int offset)
: kind_(kCodeStub), offset_(offset) {
value_.code_stub = code_stub;
DCHECK_NOT_NULL(value_.code_stub);
}
// Platform specific but identical code for all the platforms.
@ -1988,10 +1939,16 @@ void Assembler::RecordDebugBreakSlot(RelocInfo::Mode mode) {
void Assembler::DataAlign(int m) {
DCHECK(m >= 2 && base::bits::IsPowerOfTwo32(m));
DCHECK(m >= 2 && base::bits::IsPowerOfTwo(m));
while ((pc_offset() & (m - 1)) != 0) {
db(0);
}
}
void Assembler::RequestHeapObject(HeapObjectRequest request) {
request.set_offset(pc_offset());
heap_object_requests_.push_front(request);
}
} // namespace internal
} // namespace v8

View File

@ -35,11 +35,13 @@
#ifndef V8_ASSEMBLER_H_
#define V8_ASSEMBLER_H_
#include <forward_list>
#include "src/allocation.h"
#include "src/builtins/builtins.h"
#include "src/deoptimize-reason.h"
#include "src/double.h"
#include "src/globals.h"
#include "src/isolate.h"
#include "src/label.h"
#include "src/log.h"
#include "src/register-configuration.h"
@ -53,6 +55,7 @@ class ApiFunction;
namespace internal {
// Forward declarations.
class Isolate;
class SourcePosition;
class StatsCounter;
@ -69,7 +72,6 @@ class AssemblerBase: public Malloced {
IsolateData(const IsolateData&) = default;
bool serializer_enabled_;
size_t max_old_generation_size_;
#if V8_TARGET_ARCH_X64
Address code_range_start_;
#endif
@ -108,7 +110,6 @@ class AssemblerBase: public Malloced {
} else {
// Embedded constant pool not supported on this architecture.
UNREACHABLE();
return false;
}
}
@ -163,7 +164,6 @@ class AssemblerBase: public Malloced {
friend class ConstantPoolUnavailableScope;
};
// Avoids emitting debug code during the lifetime of this scope object.
class DontEmitDebugCodeScope BASE_EMBEDDED {
public:
@ -324,9 +324,10 @@ class RelocInfo {
enum Mode {
// Please note the order is important (see IsCodeTarget, IsGCRelocMode).
CODE_TARGET,
CODE_TARGET_WITH_ID,
EMBEDDED_OBJECT,
// To relocate pointers into the wasm memory embedded in wasm code
// Wasm entries are to relocate pointers into the wasm memory embedded in
// wasm code. Everything after WASM_MEMORY_REFERENCE (inclusive) is not
// GC'ed.
WASM_MEMORY_REFERENCE,
WASM_GLOBAL_REFERENCE,
WASM_MEMORY_SIZE_REFERENCE,
@ -334,7 +335,6 @@ class RelocInfo {
WASM_PROTECTED_INSTRUCTION_LANDING,
CELL,
// Everything after runtime_entry (inclusive) is not GC'ed.
RUNTIME_ENTRY,
COMMENT,
@ -373,8 +373,8 @@ class RelocInfo {
FIRST_REAL_RELOC_MODE = CODE_TARGET,
LAST_REAL_RELOC_MODE = VENEER_POOL,
LAST_CODE_ENUM = CODE_TARGET_WITH_ID,
LAST_GCED_ENUM = WASM_FUNCTION_TABLE_SIZE_REFERENCE,
LAST_CODE_ENUM = CODE_TARGET,
LAST_GCED_ENUM = EMBEDDED_OBJECT,
FIRST_SHAREABLE_RELOC_MODE = CELL,
};
@ -431,7 +431,7 @@ class RelocInfo {
}
static inline bool IsDebugBreakSlot(Mode mode) {
return IsDebugBreakSlotAtPosition(mode) || IsDebugBreakSlotAtReturn(mode) ||
IsDebugBreakSlotAtCall(mode) || IsDebugBreakSlotAtTailCall(mode);
IsDebugBreakSlotAtCall(mode);
}
static inline bool IsDebugBreakSlotAtPosition(Mode mode) {
return mode == DEBUG_BREAK_SLOT_AT_POSITION;
@ -442,9 +442,6 @@ class RelocInfo {
static inline bool IsDebugBreakSlotAtCall(Mode mode) {
return mode == DEBUG_BREAK_SLOT_AT_CALL;
}
static inline bool IsDebugBreakSlotAtTailCall(Mode mode) {
return mode == DEBUG_BREAK_SLOT_AT_TAIL_CALL;
}
static inline bool IsNone(Mode mode) {
return mode == NONE32 || mode == NONE64;
}
@ -619,7 +616,6 @@ class RelocInfo {
#endif
static const int kCodeTargetMask = (1 << (LAST_CODE_ENUM + 1)) - 1;
static const int kDataMask = (1 << CODE_TARGET_WITH_ID) | (1 << COMMENT);
static const int kDebugBreakSlotMask = 1 << DEBUG_BREAK_SLOT_AT_POSITION |
1 << DEBUG_BREAK_SLOT_AT_RETURN |
1 << DEBUG_BREAK_SLOT_AT_CALL;
@ -647,8 +643,8 @@ class RelocInfo {
// lower addresses.
class RelocInfoWriter BASE_EMBEDDED {
public:
RelocInfoWriter() : pos_(NULL), last_pc_(NULL), last_id_(0) {}
RelocInfoWriter(byte* pos, byte* pc) : pos_(pos), last_pc_(pc), last_id_(0) {}
RelocInfoWriter() : pos_(NULL), last_pc_(NULL) {}
RelocInfoWriter(byte* pos, byte* pc) : pos_(pos), last_pc_(pc) {}
byte* pos() const { return pos_; }
byte* last_pc() const { return last_pc_; }
@ -673,7 +669,7 @@ class RelocInfoWriter BASE_EMBEDDED {
inline uint32_t WriteLongPCJump(uint32_t pc_delta);
inline void WriteShortTaggedPC(uint32_t pc_delta, int tag);
inline void WriteShortTaggedData(intptr_t data_delta, int tag);
inline void WriteShortData(intptr_t data_delta);
inline void WriteMode(RelocInfo::Mode rmode);
inline void WriteModeAndPC(uint32_t pc_delta, RelocInfo::Mode rmode);
@ -682,7 +678,6 @@ class RelocInfoWriter BASE_EMBEDDED {
byte* pos_;
byte* last_pc_;
int last_id_;
RelocInfo::Mode last_mode_;
DISALLOW_COPY_AND_ASSIGN(RelocInfoWriter);
@ -726,13 +721,10 @@ class RelocIterator: public Malloced {
void AdvanceReadLongPCJump();
int GetShortDataTypeTag();
void ReadShortTaggedPC();
void ReadShortTaggedId();
void ReadShortTaggedData();
void ReadShortData();
void AdvanceReadPC();
void AdvanceReadId();
void AdvanceReadInt();
void AdvanceReadData();
@ -748,7 +740,6 @@ class RelocIterator: public Malloced {
RelocInfo rinfo_;
bool done_;
int mode_mask_;
int last_id_;
DISALLOW_COPY_AND_ASSIGN(RelocIterator);
};
@ -836,7 +827,7 @@ class ExternalReference BASE_EMBEDDED {
explicit ExternalReference(StatsCounter* counter);
ExternalReference(Isolate::AddressId id, Isolate* isolate);
ExternalReference(IsolateAddressId id, Isolate* isolate);
explicit ExternalReference(const SCTableReference& table_ref);
@ -1002,15 +993,17 @@ class ExternalReference BASE_EMBEDDED {
template <typename SubjectChar, typename PatternChar>
static ExternalReference search_string_raw(Isolate* isolate);
static ExternalReference orderedhashmap_gethash_raw(Isolate* isolate);
template <typename CollectionType, int entrysize>
static ExternalReference orderedhashtable_has_raw(Isolate* isolate);
static ExternalReference page_flags(Page* page);
static ExternalReference ForDeoptEntry(Address entry);
static ExternalReference cpu_features();
static ExternalReference is_tail_call_elimination_enabled_address(
Isolate* isolate);
static ExternalReference debug_is_active_address(Isolate* isolate);
static ExternalReference debug_hook_on_function_call_address(
Isolate* isolate);
@ -1057,12 +1050,7 @@ class ExternalReference BASE_EMBEDDED {
// This lets you register a function that rewrites all external references.
// Used by the ARM simulator to catch calls to external references.
static void set_redirector(Isolate* isolate,
ExternalReferenceRedirector* redirector) {
// We can't stack them.
DCHECK(isolate->external_reference_redirector() == NULL);
isolate->set_external_reference_redirector(
reinterpret_cast<ExternalReferenceRedirectorPointer*>(redirector));
}
ExternalReferenceRedirector* redirector);
static ExternalReference stress_deopt_count(Isolate* isolate);
@ -1156,8 +1144,10 @@ class ConstantPoolEntry {
: position_(position),
merged_index_(sharing_ok ? SHARING_ALLOWED : SHARING_PROHIBITED),
value_(value) {}
ConstantPoolEntry(int position, double value)
: position_(position), merged_index_(SHARING_ALLOWED), value64_(value) {}
ConstantPoolEntry(int position, Double value)
: position_(position),
merged_index_(SHARING_ALLOWED),
value64_(value.AsUint64()) {}
int position() const { return position_; }
bool sharing_ok() const { return merged_index_ != SHARING_PROHIBITED; }
@ -1167,6 +1157,7 @@ class ConstantPoolEntry {
return merged_index_;
}
void set_merged_index(int index) {
DCHECK(sharing_ok());
merged_index_ = index;
DCHECK(is_merged());
}
@ -1179,7 +1170,7 @@ class ConstantPoolEntry {
merged_index_ = offset;
}
intptr_t value() const { return value_; }
uint64_t value64() const { return bit_cast<uint64_t>(value64_); }
uint64_t value64() const { return value64_; }
enum Type { INTPTR, DOUBLE, NUMBER_OF_TYPES };
@ -1194,7 +1185,7 @@ class ConstantPoolEntry {
int merged_index_;
union {
intptr_t value_;
double value64_;
uint64_t value64_;
};
enum { SHARING_PROHIBITED = -2, SHARING_ALLOWED = -1 };
};
@ -1215,11 +1206,16 @@ class ConstantPoolBuilder BASE_EMBEDDED {
}
// Add double constant to the embedded constant pool
ConstantPoolEntry::Access AddEntry(int position, double value) {
ConstantPoolEntry::Access AddEntry(int position, Double value) {
ConstantPoolEntry entry(position, value);
return AddEntry(entry, ConstantPoolEntry::DOUBLE);
}
// Add double constant to the embedded constant pool
ConstantPoolEntry::Access AddEntry(int position, double value) {
return AddEntry(position, Double(value));
}
// Previews the access type required for the next new entry to be added.
ConstantPoolEntry::Access NextAccess(ConstantPoolEntry::Type type) const;
@ -1265,6 +1261,46 @@ class ConstantPoolBuilder BASE_EMBEDDED {
PerTypeEntryInfo info_[ConstantPoolEntry::NUMBER_OF_TYPES];
};
class HeapObjectRequest {
public:
explicit HeapObjectRequest(double heap_number, int offset = -1);
explicit HeapObjectRequest(CodeStub* code_stub, int offset = -1);
enum Kind { kHeapNumber, kCodeStub };
Kind kind() const { return kind_; }
double heap_number() const {
DCHECK_EQ(kind(), kHeapNumber);
return value_.heap_number;
}
CodeStub* code_stub() const {
DCHECK_EQ(kind(), kCodeStub);
return value_.code_stub;
}
// The code buffer offset at the time of the request.
int offset() const {
DCHECK_GE(offset_, 0);
return offset_;
}
void set_offset(int offset) {
DCHECK_LT(offset_, 0);
offset_ = offset;
DCHECK_GE(offset_, 0);
}
private:
Kind kind_;
union {
double heap_number;
CodeStub* code_stub;
} value_;
int offset_;
};
} // namespace internal
} // namespace v8
#endif // V8_ASSEMBLER_H_

View File

@ -8,3 +8,5 @@ mstarzinger@chromium.org
neis@chromium.org
rossberg@chromium.org
verwaest@chromium.org
# COMPONENT: Blink>JavaScript>Language

View File

@ -265,12 +265,20 @@ void AstExpressionRewriter::VisitAssignment(Assignment* node) {
AST_REWRITE_PROPERTY(Expression, node, value);
}
void AstExpressionRewriter::VisitSuspend(Suspend* node) {
void AstExpressionRewriter::VisitYield(Yield* node) {
REWRITE_THIS(node);
AST_REWRITE_PROPERTY(Expression, node, generator_object);
AST_REWRITE_PROPERTY(Expression, node, expression);
}
void AstExpressionRewriter::VisitYieldStar(YieldStar* node) {
REWRITE_THIS(node);
AST_REWRITE_PROPERTY(Expression, node, expression);
}
void AstExpressionRewriter::VisitAwait(Await* node) {
REWRITE_THIS(node);
AST_REWRITE_PROPERTY(Expression, node, expression);
}
void AstExpressionRewriter::VisitThrow(Throw* node) {
REWRITE_THIS(node);

View File

@ -8,7 +8,6 @@
#include "src/allocation.h"
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
#include "src/type-info.h"
#include "src/zone/zone.h"
namespace v8 {

View File

@ -24,9 +24,9 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
properties_(zone),
language_mode_(SLOPPY),
slot_cache_(zone),
disable_crankshaft_reason_(kNoReason),
disable_fullcodegen_reason_(kNoReason),
dont_optimize_reason_(kNoReason),
catch_prediction_(HandlerTable::UNCAUGHT),
dont_self_optimize_(false),
collect_type_profile_(collect_type_profile) {
InitializeAstVisitor(stack_limit);
}
@ -43,6 +43,7 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
void VisitVariableProxyReference(VariableProxy* node);
void VisitPropertyReference(Property* node);
void VisitReference(Expression* expr);
void VisitSuspend(Suspend* node);
void VisitStatementsAndDeclarations(Block* node);
void VisitStatements(ZoneList<Statement*>* statements);
@ -50,23 +51,20 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
void VisitArguments(ZoneList<Expression*>* arguments);
void VisitLiteralProperty(LiteralProperty* property);
int ReserveIdRange(int n) {
int ReserveId() {
int tmp = next_id_;
next_id_ += n;
next_id_ += 1;
return tmp;
}
void IncrementNodeCount() { properties_.add_node_count(1); }
void DisableSelfOptimization() {
properties_.flags() |= AstProperties::kDontSelfOptimize;
}
void DisableSelfOptimization() { dont_self_optimize_ = true; }
void DisableOptimization(BailoutReason reason) {
dont_optimize_reason_ = reason;
DisableSelfOptimization();
}
void DisableFullCodegenAndCrankshaft(BailoutReason reason) {
disable_crankshaft_reason_ = reason;
properties_.flags() |= AstProperties::kMustUseIgnitionTurbo;
void DisableFullCodegen(BailoutReason reason) {
disable_fullcodegen_reason_ = reason;
}
template <typename Node>
@ -100,9 +98,9 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
LanguageMode language_mode_;
// The slot cache allows us to reuse certain feedback slots.
FeedbackSlotCache slot_cache_;
BailoutReason disable_crankshaft_reason_;
BailoutReason disable_fullcodegen_reason_;
BailoutReason dont_optimize_reason_;
HandlerTable::CatchPrediction catch_prediction_;
bool dont_self_optimize_;
bool collect_type_profile_;
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
@ -140,7 +138,7 @@ void AstNumberingVisitor::VisitBreakStatement(BreakStatement* node) {
void AstNumberingVisitor::VisitDebuggerStatement(DebuggerStatement* node) {
IncrementNodeCount();
DisableFullCodegenAndCrankshaft(kDebuggerStatement);
DisableFullCodegen(kDebuggerStatement);
}
@ -148,14 +146,12 @@ void AstNumberingVisitor::VisitNativeFunctionLiteral(
NativeFunctionLiteral* node) {
IncrementNodeCount();
DisableOptimization(kNativeFunctionLiteral);
node->set_base_id(ReserveIdRange(NativeFunctionLiteral::num_ids()));
ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitDoExpression(DoExpression* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(DoExpression::num_ids()));
Visit(node->block());
Visit(node->result());
}
@ -163,13 +159,11 @@ void AstNumberingVisitor::VisitDoExpression(DoExpression* node) {
void AstNumberingVisitor::VisitLiteral(Literal* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(Literal::num_ids()));
}
void AstNumberingVisitor::VisitRegExpLiteral(RegExpLiteral* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(RegExpLiteral::num_ids()));
ReserveFeedbackSlots(node);
}
@ -178,16 +172,14 @@ void AstNumberingVisitor::VisitVariableProxyReference(VariableProxy* node) {
IncrementNodeCount();
switch (node->var()->location()) {
case VariableLocation::LOOKUP:
DisableFullCodegenAndCrankshaft(
kReferenceToAVariableWhichRequiresDynamicLookup);
DisableFullCodegen(kReferenceToAVariableWhichRequiresDynamicLookup);
break;
case VariableLocation::MODULE:
DisableFullCodegenAndCrankshaft(kReferenceToModuleVariable);
DisableFullCodegen(kReferenceToModuleVariable);
break;
default:
break;
}
node->set_base_id(ReserveIdRange(VariableProxy::num_ids()));
}
void AstNumberingVisitor::VisitVariableProxy(VariableProxy* node,
@ -203,15 +195,13 @@ void AstNumberingVisitor::VisitVariableProxy(VariableProxy* node) {
void AstNumberingVisitor::VisitThisFunction(ThisFunction* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(ThisFunction::num_ids()));
}
void AstNumberingVisitor::VisitSuperPropertyReference(
SuperPropertyReference* node) {
IncrementNodeCount();
DisableFullCodegenAndCrankshaft(kSuperReference);
node->set_base_id(ReserveIdRange(SuperPropertyReference::num_ids()));
DisableFullCodegen(kSuperReference);
Visit(node->this_var());
Visit(node->home_object());
}
@ -219,8 +209,7 @@ void AstNumberingVisitor::VisitSuperPropertyReference(
void AstNumberingVisitor::VisitSuperCallReference(SuperCallReference* node) {
IncrementNodeCount();
DisableFullCodegenAndCrankshaft(kSuperReference);
node->set_base_id(ReserveIdRange(SuperCallReference::num_ids()));
DisableFullCodegen(kSuperReference);
Visit(node->this_var());
Visit(node->new_target_var());
Visit(node->this_function_var());
@ -237,30 +226,33 @@ void AstNumberingVisitor::VisitReturnStatement(ReturnStatement* node) {
IncrementNodeCount();
Visit(node->expression());
DCHECK(!node->is_async_return() ||
properties_.flags() & AstProperties::kMustUseIgnitionTurbo);
DCHECK(!node->is_async_return() || disable_fullcodegen_reason_ != kNoReason);
}
void AstNumberingVisitor::VisitSuspend(Suspend* node) {
node->set_suspend_id(suspend_count_);
suspend_count_++;
IncrementNodeCount();
node->set_base_id(ReserveIdRange(Suspend::num_ids()));
Visit(node->generator_object());
Visit(node->expression());
}
void AstNumberingVisitor::VisitYield(Yield* node) { VisitSuspend(node); }
void AstNumberingVisitor::VisitYieldStar(YieldStar* node) {
VisitSuspend(node);
ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitAwait(Await* node) { VisitSuspend(node); }
void AstNumberingVisitor::VisitThrow(Throw* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(Throw::num_ids()));
Visit(node->exception());
}
void AstNumberingVisitor::VisitUnaryOperation(UnaryOperation* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(UnaryOperation::num_ids()));
if ((node->op() == Token::TYPEOF) && node->expression()->IsVariableProxy()) {
VariableProxy* proxy = node->expression()->AsVariableProxy();
VisitVariableProxy(proxy, INSIDE_TYPEOF);
@ -272,7 +264,6 @@ void AstNumberingVisitor::VisitUnaryOperation(UnaryOperation* node) {
void AstNumberingVisitor::VisitCountOperation(CountOperation* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(CountOperation::num_ids()));
Visit(node->expression());
ReserveFeedbackSlots(node);
}
@ -280,7 +271,6 @@ void AstNumberingVisitor::VisitCountOperation(CountOperation* node) {
void AstNumberingVisitor::VisitBlock(Block* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(Block::num_ids()));
Scope* scope = node->scope();
if (scope != nullptr) {
LanguageModeScope language_mode_scope(this, scope->language_mode());
@ -306,42 +296,13 @@ void AstNumberingVisitor::VisitFunctionDeclaration(FunctionDeclaration* node) {
void AstNumberingVisitor::VisitCallRuntime(CallRuntime* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(CallRuntime::num_ids()));
VisitArguments(node->arguments());
// To support catch prediction within async/await:
//
// The AstNumberingVisitor is when catch prediction currently occurs, and it
// is the only common point that has access to this information. The parser
// just doesn't know yet. Take the following two cases of catch prediction:
//
// try { await fn(); } catch (e) { }
// try { await fn(); } finally { }
//
// When parsing the await that we want to mark as caught or uncaught, it's
// not yet known whether it will be followed by a 'finally' or a 'catch.
// The AstNumberingVisitor is what learns whether it is caught. To make
// the information available later to the runtime, the AstNumberingVisitor
// has to stash it somewhere. Changing the runtime function into another
// one in ast-numbering seemed like a simple and straightforward solution to
// that problem.
if (node->is_jsruntime() && catch_prediction_ == HandlerTable::ASYNC_AWAIT) {
switch (node->context_index()) {
case Context::ASYNC_FUNCTION_AWAIT_CAUGHT_INDEX:
node->set_context_index(Context::ASYNC_FUNCTION_AWAIT_UNCAUGHT_INDEX);
break;
case Context::ASYNC_GENERATOR_AWAIT_CAUGHT:
node->set_context_index(Context::ASYNC_GENERATOR_AWAIT_UNCAUGHT);
break;
default:
break;
}
}
}
void AstNumberingVisitor::VisitWithStatement(WithStatement* node) {
IncrementNodeCount();
DisableFullCodegenAndCrankshaft(kWithStatement);
DisableFullCodegen(kWithStatement);
Visit(node->expression());
Visit(node->statement());
}
@ -350,7 +311,7 @@ void AstNumberingVisitor::VisitWithStatement(WithStatement* node) {
void AstNumberingVisitor::VisitDoWhileStatement(DoWhileStatement* node) {
IncrementNodeCount();
DisableSelfOptimization();
node->set_base_id(ReserveIdRange(DoWhileStatement::num_ids()));
node->set_osr_id(ReserveId());
node->set_first_suspend_id(suspend_count_);
Visit(node->body());
Visit(node->cond());
@ -361,7 +322,7 @@ void AstNumberingVisitor::VisitDoWhileStatement(DoWhileStatement* node) {
void AstNumberingVisitor::VisitWhileStatement(WhileStatement* node) {
IncrementNodeCount();
DisableSelfOptimization();
node->set_base_id(ReserveIdRange(WhileStatement::num_ids()));
node->set_osr_id(ReserveId());
node->set_first_suspend_id(suspend_count_);
Visit(node->cond());
Visit(node->body());
@ -372,29 +333,15 @@ void AstNumberingVisitor::VisitWhileStatement(WhileStatement* node) {
void AstNumberingVisitor::VisitTryCatchStatement(TryCatchStatement* node) {
DCHECK(node->scope() == nullptr || !node->scope()->HasBeenRemoved());
IncrementNodeCount();
DisableFullCodegenAndCrankshaft(kTryCatchStatement);
{
const HandlerTable::CatchPrediction old_prediction = catch_prediction_;
// This node uses its own prediction, unless it's "uncaught", in which case
// we adopt the prediction of the outer try-block.
HandlerTable::CatchPrediction catch_prediction = node->catch_prediction();
if (catch_prediction != HandlerTable::UNCAUGHT) {
catch_prediction_ = catch_prediction;
}
node->set_catch_prediction(catch_prediction_);
Visit(node->try_block());
catch_prediction_ = old_prediction;
}
DisableFullCodegen(kTryCatchStatement);
Visit(node->try_block());
Visit(node->catch_block());
}
void AstNumberingVisitor::VisitTryFinallyStatement(TryFinallyStatement* node) {
IncrementNodeCount();
DisableFullCodegenAndCrankshaft(kTryFinallyStatement);
// We can't know whether the finally block will override ("catch") an
// exception thrown in the try block, so we just adopt the outer prediction.
node->set_catch_prediction(catch_prediction_);
DisableFullCodegen(kTryFinallyStatement);
Visit(node->try_block());
Visit(node->finally_block());
}
@ -402,7 +349,6 @@ void AstNumberingVisitor::VisitTryFinallyStatement(TryFinallyStatement* node) {
void AstNumberingVisitor::VisitPropertyReference(Property* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(Property::num_ids()));
Visit(node->key());
Visit(node->obj());
}
@ -426,7 +372,6 @@ void AstNumberingVisitor::VisitProperty(Property* node) {
void AstNumberingVisitor::VisitAssignment(Assignment* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(Assignment::num_ids()));
if (node->is_compound()) VisitBinaryOperation(node->binary_operation());
VisitReference(node->target());
@ -437,7 +382,6 @@ void AstNumberingVisitor::VisitAssignment(Assignment* node) {
void AstNumberingVisitor::VisitBinaryOperation(BinaryOperation* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(BinaryOperation::num_ids()));
Visit(node->left());
Visit(node->right());
ReserveFeedbackSlots(node);
@ -446,7 +390,6 @@ void AstNumberingVisitor::VisitBinaryOperation(BinaryOperation* node) {
void AstNumberingVisitor::VisitCompareOperation(CompareOperation* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(CompareOperation::num_ids()));
Visit(node->left());
Visit(node->right());
ReserveFeedbackSlots(node);
@ -455,8 +398,7 @@ void AstNumberingVisitor::VisitCompareOperation(CompareOperation* node) {
void AstNumberingVisitor::VisitSpread(Spread* node) {
IncrementNodeCount();
// We can only get here from spread calls currently.
DisableFullCodegenAndCrankshaft(kSpreadCall);
node->set_base_id(ReserveIdRange(Spread::num_ids()));
DisableFullCodegen(kSpreadCall);
Visit(node->expression());
}
@ -466,8 +408,7 @@ void AstNumberingVisitor::VisitEmptyParentheses(EmptyParentheses* node) {
void AstNumberingVisitor::VisitGetIterator(GetIterator* node) {
IncrementNodeCount();
DisableFullCodegenAndCrankshaft(kGetIterator);
node->set_base_id(ReserveIdRange(GetIterator::num_ids()));
DisableFullCodegen(kGetIterator);
Visit(node->iterable());
ReserveFeedbackSlots(node);
}
@ -475,14 +416,14 @@ void AstNumberingVisitor::VisitGetIterator(GetIterator* node) {
void AstNumberingVisitor::VisitImportCallExpression(
ImportCallExpression* node) {
IncrementNodeCount();
DisableFullCodegenAndCrankshaft(kDynamicImport);
DisableFullCodegen(kDynamicImport);
Visit(node->argument());
}
void AstNumberingVisitor::VisitForInStatement(ForInStatement* node) {
IncrementNodeCount();
DisableSelfOptimization();
node->set_base_id(ReserveIdRange(ForInStatement::num_ids()));
node->set_osr_id(ReserveId());
Visit(node->enumerable()); // Not part of loop.
node->set_first_suspend_id(suspend_count_);
Visit(node->each());
@ -494,8 +435,8 @@ void AstNumberingVisitor::VisitForInStatement(ForInStatement* node) {
void AstNumberingVisitor::VisitForOfStatement(ForOfStatement* node) {
IncrementNodeCount();
DisableFullCodegenAndCrankshaft(kForOfStatement);
node->set_base_id(ReserveIdRange(ForOfStatement::num_ids()));
DisableFullCodegen(kForOfStatement);
node->set_osr_id(ReserveId());
Visit(node->assign_iterator()); // Not part of loop.
node->set_first_suspend_id(suspend_count_);
Visit(node->next_result());
@ -508,7 +449,6 @@ void AstNumberingVisitor::VisitForOfStatement(ForOfStatement* node) {
void AstNumberingVisitor::VisitConditional(Conditional* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(Conditional::num_ids()));
Visit(node->condition());
Visit(node->then_expression());
Visit(node->else_expression());
@ -517,7 +457,6 @@ void AstNumberingVisitor::VisitConditional(Conditional* node) {
void AstNumberingVisitor::VisitIfStatement(IfStatement* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(IfStatement::num_ids()));
Visit(node->condition());
Visit(node->then_statement());
if (node->HasElseStatement()) {
@ -528,7 +467,6 @@ void AstNumberingVisitor::VisitIfStatement(IfStatement* node) {
void AstNumberingVisitor::VisitSwitchStatement(SwitchStatement* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(SwitchStatement::num_ids()));
Visit(node->tag());
ZoneList<CaseClause*>* cases = node->cases();
for (int i = 0; i < cases->length(); i++) {
@ -539,7 +477,6 @@ void AstNumberingVisitor::VisitSwitchStatement(SwitchStatement* node) {
void AstNumberingVisitor::VisitCaseClause(CaseClause* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(CaseClause::num_ids()));
if (!node->is_default()) Visit(node->label());
VisitStatements(node->statements());
ReserveFeedbackSlots(node);
@ -549,7 +486,7 @@ void AstNumberingVisitor::VisitCaseClause(CaseClause* node) {
void AstNumberingVisitor::VisitForStatement(ForStatement* node) {
IncrementNodeCount();
DisableSelfOptimization();
node->set_base_id(ReserveIdRange(ForStatement::num_ids()));
node->set_osr_id(ReserveId());
if (node->init() != NULL) Visit(node->init()); // Not part of loop.
node->set_first_suspend_id(suspend_count_);
if (node->cond() != NULL) Visit(node->cond());
@ -561,8 +498,7 @@ void AstNumberingVisitor::VisitForStatement(ForStatement* node) {
void AstNumberingVisitor::VisitClassLiteral(ClassLiteral* node) {
IncrementNodeCount();
DisableFullCodegenAndCrankshaft(kClassLiteral);
node->set_base_id(ReserveIdRange(ClassLiteral::num_ids()));
DisableFullCodegen(kClassLiteral);
LanguageModeScope language_mode_scope(this, STRICT);
if (node->extends()) Visit(node->extends());
if (node->constructor()) Visit(node->constructor());
@ -578,7 +514,6 @@ void AstNumberingVisitor::VisitClassLiteral(ClassLiteral* node) {
void AstNumberingVisitor::VisitObjectLiteral(ObjectLiteral* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(node->num_ids()));
for (int i = 0; i < node->properties()->length(); i++) {
VisitLiteralProperty(node->properties()->at(i));
}
@ -591,15 +526,13 @@ void AstNumberingVisitor::VisitObjectLiteral(ObjectLiteral* node) {
}
void AstNumberingVisitor::VisitLiteralProperty(LiteralProperty* node) {
if (node->is_computed_name())
DisableFullCodegenAndCrankshaft(kComputedPropertyName);
if (node->is_computed_name()) DisableFullCodegen(kComputedPropertyName);
Visit(node->key());
Visit(node->value());
}
void AstNumberingVisitor::VisitArrayLiteral(ArrayLiteral* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(node->num_ids()));
for (int i = 0; i < node->values()->length(); i++) {
Visit(node->values()->at(i));
}
@ -610,11 +543,10 @@ void AstNumberingVisitor::VisitArrayLiteral(ArrayLiteral* node) {
void AstNumberingVisitor::VisitCall(Call* node) {
if (node->is_possibly_eval()) {
DisableFullCodegenAndCrankshaft(kFunctionCallsEval);
DisableFullCodegen(kFunctionCallsEval);
}
IncrementNodeCount();
ReserveFeedbackSlots(node);
node->set_base_id(ReserveIdRange(Call::num_ids()));
Visit(node->expression());
VisitArguments(node->arguments());
}
@ -623,7 +555,6 @@ void AstNumberingVisitor::VisitCall(Call* node) {
void AstNumberingVisitor::VisitCallNew(CallNew* node) {
IncrementNodeCount();
ReserveFeedbackSlots(node);
node->set_base_id(ReserveIdRange(CallNew::num_ids()));
Visit(node->expression());
VisitArguments(node->arguments());
}
@ -651,7 +582,6 @@ void AstNumberingVisitor::VisitArguments(ZoneList<Expression*>* arguments) {
void AstNumberingVisitor::VisitFunctionLiteral(FunctionLiteral* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(FunctionLiteral::num_ids()));
if (node->ShouldEagerCompile()) {
if (eager_literals_) {
eager_literals_->Add(new (zone())
@ -672,7 +602,6 @@ void AstNumberingVisitor::VisitFunctionLiteral(FunctionLiteral* node) {
void AstNumberingVisitor::VisitRewritableExpression(
RewritableExpression* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(RewritableExpression::num_ids()));
Visit(node->expression());
}
@ -683,24 +612,24 @@ bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
if (scope->new_target_var() != nullptr ||
scope->this_function_var() != nullptr) {
DisableFullCodegenAndCrankshaft(kSuperReference);
DisableFullCodegen(kSuperReference);
}
if (scope->arguments() != nullptr &&
!scope->arguments()->IsStackAllocated()) {
DisableFullCodegenAndCrankshaft(kContextAllocatedArguments);
DisableFullCodegen(kContextAllocatedArguments);
}
if (scope->rest_parameter() != nullptr) {
DisableFullCodegenAndCrankshaft(kRestParameter);
DisableFullCodegen(kRestParameter);
}
if (IsResumableFunction(node->kind())) {
DisableFullCodegenAndCrankshaft(kGenerator);
DisableFullCodegen(kGenerator);
}
if (IsClassConstructor(node->kind())) {
DisableFullCodegenAndCrankshaft(kClassConstructorFunction);
DisableFullCodegen(kClassConstructorFunction);
}
LanguageModeScope language_mode_scope(this, node->language_mode());
@ -716,8 +645,12 @@ bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
node->set_dont_optimize_reason(dont_optimize_reason());
node->set_suspend_count(suspend_count_);
if (FLAG_trace_opt && !FLAG_turbo) {
if (disable_crankshaft_reason_ != kNoReason) {
if (dont_self_optimize_) {
node->set_dont_self_optimize();
}
if (disable_fullcodegen_reason_ != kNoReason) {
node->set_must_use_ignition();
if (FLAG_trace_opt && FLAG_stress_fullcodegen) {
// TODO(leszeks): This is a quick'n'dirty fix to allow the debug name of
// the function to be accessed in the below print. This DCHECK will fail
// if we move ast numbering off the main thread, but that won't be before
@ -725,9 +658,9 @@ bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
AllowHandleDereference allow_deref;
DCHECK(!node->debug_name().is_null());
PrintF("[enforcing Ignition and TurboFan for %s because: %s\n",
PrintF("[enforcing Ignition for %s because: %s\n",
node->debug_name()->ToCString().get(),
GetBailoutReason(disable_crankshaft_reason_));
GetBailoutReason(disable_fullcodegen_reason_));
}
}

236
deps/v8/src/ast/ast-source-ranges.h vendored Normal file
View File

@ -0,0 +1,236 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_AST_AST_SOURCE_RANGES_H_
#define V8_AST_AST_SOURCE_RANGES_H_
#include "src/ast/ast.h"
#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
// Specifies a range within the source code. {start} is 0-based and inclusive,
// {end} is 0-based and exclusive.
struct SourceRange {
SourceRange() : SourceRange(kNoSourcePosition, kNoSourcePosition) {}
SourceRange(int start, int end) : start(start), end(end) {}
bool IsEmpty() const { return start == kNoSourcePosition; }
static SourceRange Empty() { return SourceRange(); }
static SourceRange OpenEnded(int32_t start) {
return SourceRange(start, kNoSourcePosition);
}
static SourceRange ContinuationOf(const SourceRange& that) {
return that.IsEmpty() ? Empty() : OpenEnded(that.end);
}
int32_t start, end;
};
// The list of ast node kinds that have associated source ranges.
#define AST_SOURCE_RANGE_LIST(V) \
V(Block) \
V(CaseClause) \
V(Conditional) \
V(IfStatement) \
V(IterationStatement) \
V(JumpStatement) \
V(SwitchStatement) \
V(Throw) \
V(TryCatchStatement) \
V(TryFinallyStatement)
enum class SourceRangeKind {
kBody,
kCatch,
kContinuation,
kElse,
kFinally,
kThen,
};
class AstNodeSourceRanges : public ZoneObject {
public:
virtual ~AstNodeSourceRanges() {}
virtual SourceRange GetRange(SourceRangeKind kind) = 0;
};
class ContinuationSourceRanges : public AstNodeSourceRanges {
public:
explicit ContinuationSourceRanges(int32_t continuation_position)
: continuation_position_(continuation_position) {}
SourceRange GetRange(SourceRangeKind kind) {
DCHECK(kind == SourceRangeKind::kContinuation);
return SourceRange::OpenEnded(continuation_position_);
}
private:
int32_t continuation_position_;
};
class BlockSourceRanges final : public ContinuationSourceRanges {
public:
explicit BlockSourceRanges(int32_t continuation_position)
: ContinuationSourceRanges(continuation_position) {}
};
class CaseClauseSourceRanges final : public AstNodeSourceRanges {
public:
explicit CaseClauseSourceRanges(const SourceRange& body_range)
: body_range_(body_range) {}
SourceRange GetRange(SourceRangeKind kind) {
DCHECK(kind == SourceRangeKind::kBody);
return body_range_;
}
private:
SourceRange body_range_;
};
class ConditionalSourceRanges final : public AstNodeSourceRanges {
public:
explicit ConditionalSourceRanges(const SourceRange& then_range,
const SourceRange& else_range)
: then_range_(then_range), else_range_(else_range) {}
SourceRange GetRange(SourceRangeKind kind) {
switch (kind) {
case SourceRangeKind::kThen:
return then_range_;
case SourceRangeKind::kElse:
return else_range_;
default:
UNREACHABLE();
}
}
private:
SourceRange then_range_;
SourceRange else_range_;
};
class IfStatementSourceRanges final : public AstNodeSourceRanges {
public:
explicit IfStatementSourceRanges(const SourceRange& then_range,
const SourceRange& else_range)
: then_range_(then_range), else_range_(else_range) {}
SourceRange GetRange(SourceRangeKind kind) {
switch (kind) {
case SourceRangeKind::kElse:
return else_range_;
case SourceRangeKind::kThen:
return then_range_;
case SourceRangeKind::kContinuation: {
const SourceRange& trailing_range =
else_range_.IsEmpty() ? then_range_ : else_range_;
return SourceRange::ContinuationOf(trailing_range);
}
default:
UNREACHABLE();
}
}
private:
SourceRange then_range_;
SourceRange else_range_;
};
class IterationStatementSourceRanges final : public AstNodeSourceRanges {
public:
explicit IterationStatementSourceRanges(const SourceRange& body_range)
: body_range_(body_range) {}
SourceRange GetRange(SourceRangeKind kind) {
switch (kind) {
case SourceRangeKind::kBody:
return body_range_;
case SourceRangeKind::kContinuation:
return SourceRange::ContinuationOf(body_range_);
default:
UNREACHABLE();
}
}
private:
SourceRange body_range_;
};
class JumpStatementSourceRanges final : public ContinuationSourceRanges {
public:
explicit JumpStatementSourceRanges(int32_t continuation_position)
: ContinuationSourceRanges(continuation_position) {}
};
class SwitchStatementSourceRanges final : public ContinuationSourceRanges {
public:
explicit SwitchStatementSourceRanges(int32_t continuation_position)
: ContinuationSourceRanges(continuation_position) {}
};
class ThrowSourceRanges final : public ContinuationSourceRanges {
public:
explicit ThrowSourceRanges(int32_t continuation_position)
: ContinuationSourceRanges(continuation_position) {}
};
class TryCatchStatementSourceRanges final : public AstNodeSourceRanges {
public:
explicit TryCatchStatementSourceRanges(const SourceRange& catch_range)
: catch_range_(catch_range) {}
SourceRange GetRange(SourceRangeKind kind) {
DCHECK(kind == SourceRangeKind::kCatch);
return catch_range_;
}
private:
SourceRange catch_range_;
};
class TryFinallyStatementSourceRanges final : public AstNodeSourceRanges {
public:
explicit TryFinallyStatementSourceRanges(const SourceRange& finally_range)
: finally_range_(finally_range) {}
SourceRange GetRange(SourceRangeKind kind) {
DCHECK(kind == SourceRangeKind::kFinally);
return finally_range_;
}
private:
SourceRange finally_range_;
};
// Maps ast node pointers to associated source ranges. The parser creates these
// mappings and the bytecode generator consumes them.
class SourceRangeMap final : public ZoneObject {
public:
explicit SourceRangeMap(Zone* zone) : map_(zone) {}
AstNodeSourceRanges* Find(AstNode* node) {
auto it = map_.find(node);
if (it == map_.end()) return nullptr;
return it->second;
}
// Type-checked insertion.
#define DEFINE_MAP_INSERT(type) \
void Insert(type* node, type##SourceRanges* ranges) { \
map_.emplace(node, ranges); \
}
AST_SOURCE_RANGE_LIST(DEFINE_MAP_INSERT)
#undef DEFINE_MAP_INSERT
private:
ZoneMap<AstNode*, AstNodeSourceRanges*> map_;
};
#undef AST_SOURCE_RANGE_LIST
} // namespace internal
} // namespace v8
#endif // V8_AST_AST_SOURCE_RANGES_H_

View File

@ -136,6 +136,9 @@ void AstTraversalVisitor<Subclass>::VisitFunctionDeclaration(
template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitBlock(Block* stmt) {
PROCESS_NODE(stmt);
if (stmt->scope() != nullptr) {
RECURSE_EXPRESSION(VisitDeclarations(stmt->scope()->declarations()));
}
RECURSE(VisitStatements(stmt->statements()));
}
@ -357,9 +360,20 @@ void AstTraversalVisitor<Subclass>::VisitAssignment(Assignment* expr) {
}
template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitSuspend(Suspend* expr) {
void AstTraversalVisitor<Subclass>::VisitYield(Yield* expr) {
PROCESS_EXPRESSION(expr);
RECURSE_EXPRESSION(Visit(expr->expression()));
}
template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitYieldStar(YieldStar* expr) {
PROCESS_EXPRESSION(expr);
RECURSE_EXPRESSION(Visit(expr->expression()));
}
template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitAwait(Await* expr) {
PROCESS_EXPRESSION(expr);
RECURSE_EXPRESSION(Visit(expr->generator_object()));
RECURSE_EXPRESSION(Visit(expr->expression()));
}

View File

@ -1,40 +0,0 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// A container to associate type bounds with AST Expression nodes.
#ifndef V8_AST_AST_TYPE_BOUNDS_H_
#define V8_AST_AST_TYPE_BOUNDS_H_
#include "src/ast/ast-types.h"
#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
class Expression;
class AstTypeBounds {
public:
explicit AstTypeBounds(Zone* zone) : bounds_map_(zone) {}
~AstTypeBounds() {}
AstBounds get(Expression* expression) const {
ZoneMap<Expression*, AstBounds>::const_iterator i =
bounds_map_.find(expression);
return (i != bounds_map_.end()) ? i->second : AstBounds::Unbounded();
}
void set(Expression* expression, AstBounds bounds) {
bounds_map_[expression] = bounds;
}
private:
ZoneMap<Expression*, AstBounds> bounds_map_;
};
} // namespace internal
} // namespace v8
#endif // V8_AST_AST_TYPE_BOUNDS_H_

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -32,7 +32,7 @@
#include "src/objects-inl.h"
#include "src/objects.h"
#include "src/string-hasher.h"
#include "src/utils.h"
#include "src/utils-inl.h"
namespace v8 {
namespace internal {
@ -55,10 +55,10 @@ class OneByteStringStream {
} // namespace
class AstRawStringInternalizationKey : public HashTableKey {
class AstRawStringInternalizationKey : public StringTableKey {
public:
explicit AstRawStringInternalizationKey(const AstRawString* string)
: string_(string) {}
: StringTableKey(string->hash_field()), string_(string) {}
bool IsMatch(Object* other) override {
if (string_->is_one_byte())
@ -67,18 +67,13 @@ class AstRawStringInternalizationKey : public HashTableKey {
Vector<const uint16_t>::cast(string_->literal_bytes_));
}
uint32_t Hash() override { return string_->hash() >> Name::kHashShift; }
uint32_t HashForObject(Object* key) override {
return String::cast(key)->Hash();
}
Handle<Object> AsHandle(Isolate* isolate) override {
Handle<String> AsHandle(Isolate* isolate) override {
if (string_->is_one_byte())
return isolate->factory()->NewOneByteInternalizedString(
string_->literal_bytes_, string_->hash());
string_->literal_bytes_, string_->hash_field());
return isolate->factory()->NewTwoByteInternalizedString(
Vector<const uint16_t>::cast(string_->literal_bytes_), string_->hash());
Vector<const uint16_t>::cast(string_->literal_bytes_),
string_->hash_field());
}
private:
@ -98,9 +93,9 @@ void AstRawString::Internalize(Isolate* isolate) {
bool AstRawString::AsArrayIndex(uint32_t* index) const {
// The StringHasher will set up the hash in such a way that we can use it to
// figure out whether the string is convertible to an array index.
if ((hash_ & Name::kIsNotArrayIndexMask) != 0) return false;
if ((hash_field_ & Name::kIsNotArrayIndexMask) != 0) return false;
if (length() <= Name::kMaxCachedArrayIndexLength) {
*index = Name::ArrayIndexValueBits::decode(hash_);
*index = Name::ArrayIndexValueBits::decode(hash_field_);
} else {
OneByteStringStream stream(literal_bytes_);
CHECK(StringToArrayIndex(&stream, index));
@ -127,7 +122,7 @@ uint16_t AstRawString::FirstCharacter() const {
bool AstRawString::Compare(void* a, void* b) {
const AstRawString* lhs = static_cast<AstRawString*>(a);
const AstRawString* rhs = static_cast<AstRawString*>(b);
DCHECK_EQ(lhs->hash(), rhs->hash());
DCHECK_EQ(lhs->Hash(), rhs->Hash());
if (lhs->length() != rhs->length()) return false;
const unsigned char* l = lhs->raw_data();
@ -205,7 +200,6 @@ bool AstValue::BooleanValue() const {
return false;
}
UNREACHABLE();
return false;
}
@ -253,23 +247,23 @@ AstRawString* AstValueFactory::GetOneByteStringInternal(
if (literal.length() == 1 && IsInRange(literal[0], 'a', 'z')) {
int key = literal[0] - 'a';
if (one_character_strings_[key] == nullptr) {
uint32_t hash = StringHasher::HashSequentialString<uint8_t>(
uint32_t hash_field = StringHasher::HashSequentialString<uint8_t>(
literal.start(), literal.length(), hash_seed_);
one_character_strings_[key] = GetString(hash, true, literal);
one_character_strings_[key] = GetString(hash_field, true, literal);
}
return one_character_strings_[key];
}
uint32_t hash = StringHasher::HashSequentialString<uint8_t>(
uint32_t hash_field = StringHasher::HashSequentialString<uint8_t>(
literal.start(), literal.length(), hash_seed_);
return GetString(hash, true, literal);
return GetString(hash_field, true, literal);
}
AstRawString* AstValueFactory::GetTwoByteStringInternal(
Vector<const uint16_t> literal) {
uint32_t hash = StringHasher::HashSequentialString<uint16_t>(
uint32_t hash_field = StringHasher::HashSequentialString<uint16_t>(
literal.start(), literal.length(), hash_seed_);
return GetString(hash, false, Vector<const byte>::cast(literal));
return GetString(hash_field, false, Vector<const byte>::cast(literal));
}
@ -385,21 +379,21 @@ const AstValue* AstValueFactory::NewTheHole() {
#undef GENERATE_VALUE_GETTER
AstRawString* AstValueFactory::GetString(uint32_t hash, bool is_one_byte,
AstRawString* AstValueFactory::GetString(uint32_t hash_field, bool is_one_byte,
Vector<const byte> literal_bytes) {
// literal_bytes here points to whatever the user passed, and this is OK
// because we use vector_compare (which checks the contents) to compare
// against the AstRawStrings which are in the string_table_. We should not
// return this AstRawString.
AstRawString key(is_one_byte, literal_bytes, hash);
base::HashMap::Entry* entry = string_table_.LookupOrInsert(&key, hash);
AstRawString key(is_one_byte, literal_bytes, hash_field);
base::HashMap::Entry* entry = string_table_.LookupOrInsert(&key, key.Hash());
if (entry->value == nullptr) {
// Copy literal contents for later comparison.
int length = literal_bytes.length();
byte* new_literal_bytes = zone_->NewArray<byte>(length);
memcpy(new_literal_bytes, literal_bytes.start(), length);
AstRawString* new_string = new (zone_) AstRawString(
is_one_byte, Vector<const byte>(new_literal_bytes, length), hash);
is_one_byte, Vector<const byte>(new_literal_bytes, length), hash_field);
CHECK_NOT_NULL(new_string);
AddString(new_string);
entry->key = new_string;

View File

@ -64,9 +64,8 @@ class AstRawString final : public ZoneObject {
}
// For storing AstRawStrings in a hash map.
uint32_t hash() const {
return hash_;
}
uint32_t hash_field() const { return hash_field_; }
uint32_t Hash() const { return hash_field_ >> Name::kHashShift; }
// This function can be called after internalizing.
V8_INLINE Handle<String> string() const {
@ -83,10 +82,10 @@ class AstRawString final : public ZoneObject {
// Members accessed only by the AstValueFactory & related classes:
static bool Compare(void* a, void* b);
AstRawString(bool is_one_byte, const Vector<const byte>& literal_bytes,
uint32_t hash)
uint32_t hash_field)
: next_(nullptr),
literal_bytes_(literal_bytes),
hash_(hash),
hash_field_(hash_field),
is_one_byte_(is_one_byte) {}
AstRawString* next() {
DCHECK(!has_string_);
@ -114,7 +113,7 @@ class AstRawString final : public ZoneObject {
};
Vector<const byte> literal_bytes_; // Memory owned by Zone.
uint32_t hash_;
uint32_t hash_field_;
bool is_one_byte_;
#ifdef DEBUG
// (Debug-only:) Verify the object life-cylce: Some functions may only be
@ -203,7 +202,6 @@ class AstValue : public ZoneObject {
if (IsHeapNumber()) return number_;
if (IsSmi()) return smi_;
UNREACHABLE();
return 0;
}
Smi* AsSmi() const {
@ -368,21 +366,21 @@ class AstStringConstants final {
string_table_(AstRawString::Compare),
hash_seed_(hash_seed) {
DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
#define F(name, str) \
{ \
const char* data = str; \
Vector<const uint8_t> literal(reinterpret_cast<const uint8_t*>(data), \
static_cast<int>(strlen(data))); \
uint32_t hash = StringHasher::HashSequentialString<uint8_t>( \
literal.start(), literal.length(), hash_seed_); \
name##_string_ = new (&zone_) AstRawString(true, literal, hash); \
/* The Handle returned by the factory is located on the roots */ \
/* array, not on the temporary HandleScope, so this is safe. */ \
name##_string_->set_string(isolate->factory()->name##_string()); \
base::HashMap::Entry* entry = \
string_table_.InsertNew(name##_string_, name##_string_->hash()); \
DCHECK(entry->value == nullptr); \
entry->value = reinterpret_cast<void*>(1); \
#define F(name, str) \
{ \
const char* data = str; \
Vector<const uint8_t> literal(reinterpret_cast<const uint8_t*>(data), \
static_cast<int>(strlen(data))); \
uint32_t hash_field = StringHasher::HashSequentialString<uint8_t>( \
literal.start(), literal.length(), hash_seed_); \
name##_string_ = new (&zone_) AstRawString(true, literal, hash_field); \
/* The Handle returned by the factory is located on the roots */ \
/* array, not on the temporary HandleScope, so this is safe. */ \
name##_string_->set_string(isolate->factory()->name##_string()); \
base::HashMap::Entry* entry = \
string_table_.InsertNew(name##_string_, name##_string_->Hash()); \
DCHECK_NULL(entry->value); \
entry->value = reinterpret_cast<void*>(1); \
}
STRING_CONSTANTS(F)
#undef F

177
deps/v8/src/ast/ast.cc vendored
View File

@ -23,7 +23,6 @@
#include "src/property-details.h"
#include "src/property.h"
#include "src/string-stream.h"
#include "src/type-info.h"
namespace v8 {
namespace internal {
@ -152,14 +151,12 @@ bool Expression::IsAnonymousFunctionDefinition() const {
AsClassLiteral()->IsAnonymousFunctionDefinition());
}
void Expression::MarkTail() {
if (IsConditional()) {
AsConditional()->MarkTail();
} else if (IsCall()) {
AsCall()->MarkTail();
} else if (IsBinaryOperation()) {
AsBinaryOperation()->MarkTail();
}
bool Expression::IsConciseMethodDefinition() const {
return IsFunctionLiteral() && IsConciseMethod(AsFunctionLiteral()->kind());
}
bool Expression::IsAccessorFunctionDefinition() const {
return IsFunctionLiteral() && IsAccessorFunction(AsFunctionLiteral()->kind());
}
bool Statement::IsJump() const {
@ -193,17 +190,6 @@ VariableProxy::VariableProxy(Variable* var, int start_position)
BindTo(var);
}
VariableProxy::VariableProxy(const AstRawString* name,
VariableKind variable_kind, int start_position)
: Expression(start_position, kVariableProxy),
raw_name_(name),
next_unresolved_(nullptr) {
bit_field_ |= IsThisField::encode(variable_kind == THIS_VARIABLE) |
IsAssignedField::encode(false) |
IsResolvedField::encode(false) |
HoleCheckModeField::encode(HoleCheckMode::kElided);
}
VariableProxy::VariableProxy(const VariableProxy* copy_from)
: Expression(copy_from->position(), kVariableProxy),
next_unresolved_(nullptr) {
@ -396,10 +382,9 @@ void LiteralProperty::SetStoreDataPropertySlot(FeedbackSlot slot) {
}
bool LiteralProperty::NeedsSetFunctionName() const {
return is_computed_name_ &&
(value_->IsAnonymousFunctionDefinition() ||
(value_->IsFunctionLiteral() &&
IsConciseMethod(value_->AsFunctionLiteral()->kind())));
return is_computed_name_ && (value_->IsAnonymousFunctionDefinition() ||
value_->IsConciseMethodDefinition() ||
value_->IsAccessorFunctionDefinition());
}
ClassLiteralProperty::ClassLiteralProperty(Expression* key, Expression* value,
@ -554,10 +539,11 @@ void ObjectLiteral::InitFlagsForPendingNullPrototype(int i) {
}
}
void ObjectLiteral::InitDepthAndFlags() {
if (is_initialized()) return;
int ObjectLiteral::InitDepthAndFlags() {
if (is_initialized()) return depth();
bool is_simple = true;
bool has_seen_prototype = false;
bool needs_initial_allocation_site = false;
int depth_acc = 1;
uint32_t nof_properties = 0;
uint32_t elements = 0;
@ -584,26 +570,17 @@ void ObjectLiteral::InitDepthAndFlags() {
}
DCHECK(!property->is_computed_name());
MaterializedLiteral* m_literal = property->value()->AsMaterializedLiteral();
if (m_literal != NULL) {
m_literal->InitDepthAndFlags();
if (m_literal->depth() >= depth_acc) depth_acc = m_literal->depth() + 1;
MaterializedLiteral* literal = property->value()->AsMaterializedLiteral();
if (literal != nullptr) {
int subliteral_depth = literal->InitDepthAndFlags() + 1;
if (subliteral_depth > depth_acc) depth_acc = subliteral_depth;
needs_initial_allocation_site |= literal->NeedsInitialAllocationSite();
}
const AstValue* key = property->key()->AsLiteral()->raw_value();
Expression* value = property->value();
bool is_compile_time_value = CompileTimeValue::IsCompileTimeValue(value);
// Ensure objects that may, at any point in time, contain fields with double
// representation are always treated as nested objects. This is true for
// computed fields, and smi and double literals.
// TODO(verwaest): Remove once we can store them inline.
if (FLAG_track_double_fields &&
(value->IsNumberLiteral() || !is_compile_time_value)) {
set_may_store_doubles(true);
}
is_simple = is_simple && is_compile_time_value;
// Keep track of the number of elements in the object literal and
@ -622,11 +599,13 @@ void ObjectLiteral::InitDepthAndFlags() {
nof_properties++;
}
set_depth(depth_acc);
set_is_simple(is_simple);
set_needs_initial_allocation_site(needs_initial_allocation_site);
set_has_elements(elements > 0);
set_fast_elements((max_element_index <= 32) ||
((2 * elements) >= max_element_index));
set_has_elements(elements > 0);
set_is_simple(is_simple);
set_depth(depth_acc);
return depth_acc;
}
void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
@ -699,19 +678,14 @@ bool ObjectLiteral::IsFastCloningSupported() const {
// The FastCloneShallowObject builtin doesn't copy elements, and object
// literals don't support copy-on-write (COW) elements for now.
// TODO(mvstanton): make object literals support COW elements.
return fast_elements() && has_shallow_properties() &&
return fast_elements() && is_shallow() &&
properties_count() <=
ConstructorBuiltins::kMaximumClonedShallowObjectProperties;
}
ElementsKind ArrayLiteral::constant_elements_kind() const {
return static_cast<ElementsKind>(constant_elements()->elements_kind());
}
void ArrayLiteral::InitDepthAndFlags() {
int ArrayLiteral::InitDepthAndFlags() {
DCHECK_LT(first_spread_index_, 0);
if (is_initialized()) return;
if (is_initialized()) return depth();
int constants_length = values()->length();
@ -722,12 +696,10 @@ void ArrayLiteral::InitDepthAndFlags() {
for (; array_index < constants_length; array_index++) {
Expression* element = values()->at(array_index);
DCHECK(!element->IsSpread());
MaterializedLiteral* m_literal = element->AsMaterializedLiteral();
if (m_literal != NULL) {
m_literal->InitDepthAndFlags();
if (m_literal->depth() + 1 > depth_acc) {
depth_acc = m_literal->depth() + 1;
}
MaterializedLiteral* literal = element->AsMaterializedLiteral();
if (literal != NULL) {
int subliteral_depth = literal->InitDepthAndFlags() + 1;
if (subliteral_depth > depth_acc) depth_acc = subliteral_depth;
}
if (!CompileTimeValue::IsCompileTimeValue(element)) {
@ -735,8 +707,12 @@ void ArrayLiteral::InitDepthAndFlags() {
}
}
set_is_simple(is_simple);
set_depth(depth_acc);
set_is_simple(is_simple);
// Array literals always need an initial allocation site to properly track
// elements transitions.
set_needs_initial_allocation_site(true);
return depth_acc;
}
void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
@ -782,12 +758,12 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
// Simple and shallow arrays can be lazily copied, we transform the
// elements array to a copy-on-write array.
if (is_simple() && depth() == 1 && array_index > 0 &&
IsFastSmiOrObjectElementsKind(kind)) {
IsSmiOrObjectElementsKind(kind)) {
fixed_array->set_map(isolate->heap()->fixed_cow_array_map());
}
Handle<FixedArrayBase> elements = fixed_array;
if (IsFastDoubleElementsKind(kind)) {
if (IsDoubleElementsKind(kind)) {
ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
elements = isolate->factory()->NewFixedDoubleArray(constants_length);
// We are copying from non-fast-double to fast-double.
@ -832,6 +808,12 @@ void ArrayLiteral::AssignFeedbackSlots(FeedbackVectorSpec* spec,
}
}
bool MaterializedLiteral::IsSimple() const {
if (IsArrayLiteral()) return AsArrayLiteral()->is_simple();
if (IsObjectLiteral()) return AsObjectLiteral()->is_simple();
DCHECK(IsRegExpLiteral());
return false;
}
Handle<Object> MaterializedLiteral::GetBoilerplateValue(Expression* expression,
Isolate* isolate) {
@ -844,15 +826,22 @@ Handle<Object> MaterializedLiteral::GetBoilerplateValue(Expression* expression,
return isolate->factory()->uninitialized_value();
}
void MaterializedLiteral::InitDepthAndFlags() {
int MaterializedLiteral::InitDepthAndFlags() {
if (IsArrayLiteral()) return AsArrayLiteral()->InitDepthAndFlags();
if (IsObjectLiteral()) return AsObjectLiteral()->InitDepthAndFlags();
DCHECK(IsRegExpLiteral());
return 1;
}
bool MaterializedLiteral::NeedsInitialAllocationSite() {
if (IsArrayLiteral()) {
return AsArrayLiteral()->InitDepthAndFlags();
return AsArrayLiteral()->needs_initial_allocation_site();
}
if (IsObjectLiteral()) {
return AsObjectLiteral()->InitDepthAndFlags();
return AsObjectLiteral()->needs_initial_allocation_site();
}
DCHECK(IsRegExpLiteral());
DCHECK_LE(1, depth()); // Depth should be initialized.
return false;
}
void MaterializedLiteral::BuildConstants(Isolate* isolate) {
@ -865,26 +854,6 @@ void MaterializedLiteral::BuildConstants(Isolate* isolate) {
DCHECK(IsRegExpLiteral());
}
void UnaryOperation::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
// TODO(olivf) If this Operation is used in a test context, then the
// expression has a ToBoolean stub and we want to collect the type
// information. However the GraphBuilder expects it to be on the instruction
// corresponding to the TestContext, therefore we have to store it here and
// not on the operand.
set_to_boolean_types(oracle->ToBooleanTypes(expression()->test_id()));
}
void BinaryOperation::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
// TODO(olivf) If this Operation is used in a test context, then the right
// hand side has a ToBoolean stub and we want to collect the type information.
// However the GraphBuilder expects it to be on the instruction corresponding
// to the TestContext, therefore we have to store it here and not on the
// right hand operand.
set_to_boolean_types(oracle->ToBooleanTypes(right()->test_id()));
}
void BinaryOperation::AssignFeedbackSlots(FeedbackVectorSpec* spec,
LanguageMode language_mode,
FeedbackSlotCache* cache) {
@ -1018,35 +987,6 @@ bool CompareOperation::IsLiteralCompareNull(Expression** expr) {
// ----------------------------------------------------------------------------
// Recording of type feedback
// TODO(rossberg): all RecordTypeFeedback functions should disappear
// once we use the common type field in the AST consistently.
void Expression::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
if (IsUnaryOperation()) {
AsUnaryOperation()->RecordToBooleanTypeFeedback(oracle);
} else if (IsBinaryOperation()) {
AsBinaryOperation()->RecordToBooleanTypeFeedback(oracle);
} else {
set_to_boolean_types(oracle->ToBooleanTypes(test_id()));
}
}
void SmallMapList::AddMapIfMissing(Handle<Map> map, Zone* zone) {
if (!Map::TryUpdate(map).ToHandle(&map)) return;
for (int i = 0; i < length(); ++i) {
if (at(i).is_identical_to(map)) return;
}
Add(map, zone);
}
void SmallMapList::FilterForPossibleTransitions(Map* root_map) {
for (int i = list_.length() - 1; i >= 0; i--) {
if (at(i)->FindRootMap() != root_map) {
list_.RemoveElement(list_.at(i));
}
}
}
Handle<Map> SmallMapList::at(int i) const { return Handle<Map>(list_.at(i)); }
SmallMapList* Expression::GetReceiverTypes() {
@ -1062,7 +1002,6 @@ SmallMapList* Expression::GetReceiverTypes() {
#undef GENERATE_CASE
default:
UNREACHABLE();
return nullptr;
}
}
@ -1075,7 +1014,6 @@ KeyedAccessStoreMode Expression::GetStoreMode() const {
#undef GENERATE_CASE
default:
UNREACHABLE();
return STANDARD_STORE;
}
}
@ -1088,7 +1026,6 @@ IcCheckType Expression::GetKeyType() const {
#undef GENERATE_CASE
default:
UNREACHABLE();
return PROPERTY;
}
}
@ -1102,7 +1039,6 @@ bool Expression::IsMonomorphic() const {
#undef GENERATE_CASE
default:
UNREACHABLE();
return false;
}
}
@ -1141,10 +1077,7 @@ Call::CallType Call::GetCallType() const {
CaseClause::CaseClause(Expression* label, ZoneList<Statement*>* statements,
int pos)
: Expression(pos, kCaseClause),
label_(label),
statements_(statements),
compare_type_(AstType::None()) {}
: Expression(pos, kCaseClause), label_(label), statements_(statements) {}
void CaseClause::AssignFeedbackSlots(FeedbackVectorSpec* spec,
LanguageMode language_mode,
@ -1154,7 +1087,7 @@ void CaseClause::AssignFeedbackSlots(FeedbackVectorSpec* spec,
uint32_t Literal::Hash() {
return raw_value()->IsString()
? raw_value()->AsString()->hash()
? raw_value()->AsString()->Hash()
: ComputeLongHash(double_to_uint64(raw_value()->AsNumber()));
}

1041
deps/v8/src/ast/ast.h vendored

File diff suppressed because it is too large Load Diff

View File

@ -15,8 +15,9 @@ namespace internal {
bool CompileTimeValue::IsCompileTimeValue(Expression* expression) {
if (expression->IsLiteral()) return true;
MaterializedLiteral* lit = expression->AsMaterializedLiteral();
return lit != NULL && lit->is_simple();
MaterializedLiteral* literal = expression->AsMaterializedLiteral();
if (literal == nullptr) return false;
return literal->IsSimple();
}
Handle<FixedArray> CompileTimeValue::GetValue(Isolate* isolate,
@ -33,7 +34,7 @@ Handle<FixedArray> CompileTimeValue::GetValue(Isolate* isolate,
result->set(kElementsSlot, *object_literal->constant_properties());
} else {
ArrayLiteral* array_literal = expression->AsArrayLiteral();
DCHECK(array_literal != NULL && array_literal->is_simple());
DCHECK(array_literal->is_simple());
result->set(kLiteralTypeSlot, Smi::FromInt(kArrayLiteralFlag));
result->set(kElementsSlot, *array_literal->constant_elements());
}
@ -41,7 +42,7 @@ Handle<FixedArray> CompileTimeValue::GetValue(Isolate* isolate,
}
int CompileTimeValue::GetLiteralTypeFlags(Handle<FixedArray> value) {
return Smi::cast(value->get(kLiteralTypeSlot))->value();
return Smi::ToInt(value->get(kLiteralTypeSlot));
}
Handle<HeapObject> CompileTimeValue::GetElements(Handle<FixedArray> value) {

View File

@ -30,8 +30,9 @@ int ContextSlotCache::Lookup(Object* data, String* name, VariableMode* mode,
InitializationFlag* init_flag,
MaybeAssignedFlag* maybe_assigned_flag) {
int index = Hash(data, name);
DCHECK(name->IsInternalizedString());
Key& key = keys_[index];
if ((key.data == data) && key.name->Equals(name)) {
if (key.data == data && key.name == name) {
Value result(values_[index]);
if (mode != nullptr) *mode = result.mode();
if (init_flag != nullptr) *init_flag = result.initialization_flag();
@ -46,23 +47,18 @@ void ContextSlotCache::Update(Handle<Object> data, Handle<String> name,
VariableMode mode, InitializationFlag init_flag,
MaybeAssignedFlag maybe_assigned_flag,
int slot_index) {
DisallowHeapAllocation no_gc;
Handle<String> internalized_name;
DCHECK(slot_index > kNotFound);
if (StringTable::InternalizeStringIfExists(name->GetIsolate(), name)
.ToHandle(&internalized_name)) {
int index = Hash(*data, *internalized_name);
Key& key = keys_[index];
key.data = *data;
key.name = *internalized_name;
// Please note value only takes a uint as index.
values_[index] =
Value(mode, init_flag, maybe_assigned_flag, slot_index - kNotFound)
.raw();
DCHECK(name->IsInternalizedString());
DCHECK_LT(kNotFound, slot_index);
int index = Hash(*data, *name);
Key& key = keys_[index];
key.data = *data;
key.name = *name;
// Please note value only takes a uint as index.
values_[index] =
Value(mode, init_flag, maybe_assigned_flag, slot_index - kNotFound).raw();
#ifdef DEBUG
ValidateEntry(data, name, mode, init_flag, maybe_assigned_flag, slot_index);
ValidateEntry(data, name, mode, init_flag, maybe_assigned_flag, slot_index);
#endif
}
}
void ContextSlotCache::Clear() {
@ -76,20 +72,16 @@ void ContextSlotCache::ValidateEntry(Handle<Object> data, Handle<String> name,
InitializationFlag init_flag,
MaybeAssignedFlag maybe_assigned_flag,
int slot_index) {
DisallowHeapAllocation no_gc;
Handle<String> internalized_name;
if (StringTable::InternalizeStringIfExists(name->GetIsolate(), name)
.ToHandle(&internalized_name)) {
int index = Hash(*data, *name);
Key& key = keys_[index];
DCHECK(key.data == *data);
DCHECK(key.name->Equals(*name));
Value result(values_[index]);
DCHECK(result.mode() == mode);
DCHECK(result.initialization_flag() == init_flag);
DCHECK(result.maybe_assigned_flag() == maybe_assigned_flag);
DCHECK(result.index() + kNotFound == slot_index);
}
DCHECK(name->IsInternalizedString());
int index = Hash(*data, *name);
Key& key = keys_[index];
DCHECK_EQ(key.data, *data);
DCHECK_EQ(key.name, *name);
Value result(values_[index]);
DCHECK_EQ(result.mode(), mode);
DCHECK_EQ(result.initialization_flag(), init_flag);
DCHECK_EQ(result.maybe_assigned_flag(), maybe_assigned_flag);
DCHECK_EQ(result.index() + kNotFound, slot_index);
}
#endif // DEBUG

View File

@ -12,28 +12,33 @@
namespace v8 {
namespace internal {
void ModuleDescriptor::AddImport(
const AstRawString* import_name, const AstRawString* local_name,
const AstRawString* module_request, Scanner::Location loc, Zone* zone) {
void ModuleDescriptor::AddImport(const AstRawString* import_name,
const AstRawString* local_name,
const AstRawString* module_request,
const Scanner::Location loc,
const Scanner::Location specifier_loc,
Zone* zone) {
Entry* entry = new (zone) Entry(loc);
entry->local_name = local_name;
entry->import_name = import_name;
entry->module_request = AddModuleRequest(module_request);
entry->module_request = AddModuleRequest(module_request, specifier_loc);
AddRegularImport(entry);
}
void ModuleDescriptor::AddStarImport(
const AstRawString* local_name, const AstRawString* module_request,
Scanner::Location loc, Zone* zone) {
void ModuleDescriptor::AddStarImport(const AstRawString* local_name,
const AstRawString* module_request,
const Scanner::Location loc,
const Scanner::Location specifier_loc,
Zone* zone) {
Entry* entry = new (zone) Entry(loc);
entry->local_name = local_name;
entry->module_request = AddModuleRequest(module_request);
entry->module_request = AddModuleRequest(module_request, specifier_loc);
AddNamespaceImport(entry, zone);
}
void ModuleDescriptor::AddEmptyImport(const AstRawString* module_request) {
AddModuleRequest(module_request);
void ModuleDescriptor::AddEmptyImport(const AstRawString* module_request,
const Scanner::Location specifier_loc) {
AddModuleRequest(module_request, specifier_loc);
}
@ -46,24 +51,27 @@ void ModuleDescriptor::AddExport(
AddRegularExport(entry);
}
void ModuleDescriptor::AddExport(
const AstRawString* import_name, const AstRawString* export_name,
const AstRawString* module_request, Scanner::Location loc, Zone* zone) {
void ModuleDescriptor::AddExport(const AstRawString* import_name,
const AstRawString* export_name,
const AstRawString* module_request,
const Scanner::Location loc,
const Scanner::Location specifier_loc,
Zone* zone) {
DCHECK_NOT_NULL(import_name);
DCHECK_NOT_NULL(export_name);
Entry* entry = new (zone) Entry(loc);
entry->export_name = export_name;
entry->import_name = import_name;
entry->module_request = AddModuleRequest(module_request);
entry->module_request = AddModuleRequest(module_request, specifier_loc);
AddSpecialExport(entry, zone);
}
void ModuleDescriptor::AddStarExport(
const AstRawString* module_request, Scanner::Location loc, Zone* zone) {
void ModuleDescriptor::AddStarExport(const AstRawString* module_request,
const Scanner::Location loc,
const Scanner::Location specifier_loc,
Zone* zone) {
Entry* entry = new (zone) Entry(loc);
entry->module_request = AddModuleRequest(module_request);
entry->module_request = AddModuleRequest(module_request, specifier_loc);
AddSpecialExport(entry, zone);
}

Some files were not shown because too many files have changed in this diff Show More