deps: update V8 to 5.8.283.38

PR-URL: https://github.com/nodejs/node/pull/12784
Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl>
Reviewed-By: Gibson Fahnestock <gibfahn@gmail.com>
This commit is contained in:
Michaël Zasso 2017-05-02 10:50:00 +02:00
parent 73d9c0f903
commit 60d1aac8d2
1492 changed files with 76973 additions and 49777 deletions

2
deps/v8/.gitignore vendored
View File

@ -54,7 +54,6 @@ shell_g
/test/promises-aplus/promises-tests
/test/promises-aplus/promises-tests.tar.gz
/test/promises-aplus/sinon
/test/simdjs/data
/test/test262/data
/test/test262/data.tar
/test/test262/harness
@ -102,5 +101,6 @@ v8.ignition_dispatches_table.json
/test/fuzzer/wasm_asmjs.tar.gz
/src/inspector/build/closure-compiler.tar.gz
/src/inspector/build/closure-compiler
/test/wasm-js
!/third_party/jinja2
!/third_party/markupsafe

2
deps/v8/AUTHORS vendored
View File

@ -48,6 +48,7 @@ Bert Belder <bertbelder@gmail.com>
Burcu Dogan <burcujdogan@gmail.com>
Caitlin Potter <caitpotter88@gmail.com>
Craig Schlenter <craig.schlenter@gmail.com>
Choongwoo Han <cwhan.tunz@gmail.com>
Chris Nardi <hichris123@gmail.com>
Christopher A. Taylor <chris@gameclosure.com>
Daniel Andersson <kodandersson@gmail.com>
@ -64,7 +65,6 @@ Filipe David Manana <fdmanana@gmail.com>
Franziska Hinkelmann <franziska.hinkelmann@gmail.com>
Geoffrey Garside <ggarside@gmail.com>
Gwang Yoon Hwang <ryumiel@company100.net>
Han Choongwoo <cwhan.tunz@gmail.com>
Henrique Ferreiro <henrique.ferreiro@gmail.com>
Hirofumi Mako <mkhrfm@gmail.com>
Honggyu Kim <honggyu.kp@gmail.com>

144
deps/v8/BUILD.gn vendored
View File

@ -30,7 +30,7 @@ declare_args() {
v8_deprecation_warnings = false
# Enable compiler warnings when using V8_DEPRECATE_SOON apis.
v8_imminent_deprecation_warnings = ""
v8_imminent_deprecation_warnings = false
# Embeds the given script into the snapshot.
v8_embed_script = ""
@ -41,12 +41,18 @@ declare_args() {
# Sets -dENABLE_GDB_JIT_INTERFACE.
v8_enable_gdbjit = ""
# Sets -dENABLE_VTUNE_JIT_INTERFACE.
v8_enable_vtunejit = false
# Sets -dENABLE_HANDLE_ZAPPING.
v8_enable_handle_zapping = is_debug
# Enable slow dchecks.
v8_enable_slow_dchecks = false
# Enable code-generation-time checking of types in the CodeStubAssembler.
v8_enable_verify_csa = false
# Interpreted regexp engine exists as platform-independent alternative
# based where the regular expression is compiled to a bytecode.
v8_interpreted_regexp = false
@ -76,23 +82,22 @@ declare_args() {
# Similar to the ARM hard float ABI but on MIPS.
v8_use_mips_abi_hardfloat = true
}
# Set project-specific defaults for some args if not provided in args.gn. The
# defaults can be set in the respective build_overrides files.
if (v8_imminent_deprecation_warnings == "") {
if (defined(v8_imminent_deprecation_warnings_default)) {
v8_imminent_deprecation_warnings = v8_imminent_deprecation_warnings_default
} else {
v8_imminent_deprecation_warnings = false
}
}
if (v8_enable_gdbjit == "") {
if (defined(v8_enable_gdbjit_default)) {
v8_enable_gdbjit = v8_enable_gdbjit_default
} else {
v8_enable_gdbjit = false
}
# List of extra files to snapshot. They will be snapshotted in order so
# if files export symbols used by later files, they should go first.
#
# This default is used by cctests. Projects using V8 will want to override.
v8_extra_library_files = [ "//test/cctest/test-extra.js" ]
# Like v8_extra_library_files but for experimental features.
#
# This default is used by cctests. Projects using V8 will want to override.
v8_experimental_extra_library_files =
[ "//test/cctest/test-experimental-extra.js" ]
v8_enable_gdbjit = ((v8_current_cpu == "x86" || v8_current_cpu == "x64" ||
v8_current_cpu == "x87") && (is_linux || is_mac)) ||
(v8_current_cpu == "ppc64" && is_linux)
}
# Derived defaults.
@ -195,6 +200,9 @@ config("features") {
if (v8_enable_gdbjit) {
defines += [ "ENABLE_GDB_JIT_INTERFACE" ]
}
if (v8_enable_vtunejit) {
defines += [ "ENABLE_VTUNE_JIT_INTERFACE" ]
}
if (v8_enable_object_print) {
defines += [ "OBJECT_PRINT" ]
}
@ -381,6 +389,10 @@ config("toolchain") {
defines += [ "DEBUG" ]
}
if (v8_enable_verify_csa) {
defines += [ "ENABLE_VERIFY_CSA" ]
}
if (v8_no_inline) {
cflags += [
"-fno-inline-functions",
@ -395,11 +407,10 @@ config("toolchain") {
# TODO(hans): Remove once http://crbug.com/428099 is resolved.
"-Winconsistent-missing-override",
]
if (v8_current_cpu == "x64" || v8_current_cpu == "arm64" ||
v8_current_cpu == "mips64el") {
cflags += [ "-Wshorten-64-to-32" ]
}
#if (v8_current_cpu == "x64" || v8_current_cpu == "arm64" ||
# v8_current_cpu == "mips64el") {
# cflags += [ "-Wshorten-64-to-32" ]
#}
}
}
@ -437,7 +448,6 @@ action("js2c") {
"src/js/templates.js",
"src/js/spread.js",
"src/js/proxy.js",
"src/js/async-await.js",
"src/js/harmony-string-padding.js",
"src/debug/mirrors.js",
"src/debug/debug.js",
@ -482,20 +492,12 @@ action("js2c_experimental") {
"src/js/macros.py",
"src/messages.h",
"src/js/harmony-atomics.js",
"src/js/harmony-simd.js",
]
outputs = [
"$target_gen_dir/experimental-libraries.cc",
]
if (v8_enable_i18n_support) {
sources += [
"src/js/datetime-format-to-parts.js",
"src/js/icu-case-mapping.js",
]
}
args = [
rebase_path("$target_gen_dir/experimental-libraries.cc",
root_build_dir),
@ -749,6 +751,7 @@ action("v8_dump_build_config") {
]
args = [
rebase_path("$root_out_dir/v8_build_config.json", root_build_dir),
"current_cpu=\"$current_cpu\"",
"dcheck_always_on=$dcheck_always_on",
"is_asan=$is_asan",
"is_cfi=$is_cfi",
@ -757,6 +760,7 @@ action("v8_dump_build_config") {
"is_msan=$is_msan",
"is_tsan=$is_tsan",
"target_cpu=\"$target_cpu\"",
"v8_current_cpu=\"$v8_current_cpu\"",
"v8_enable_i18n_support=$v8_enable_i18n_support",
"v8_enable_inspector=$v8_enable_inspector",
"v8_target_cpu=\"$v8_target_cpu\"",
@ -866,7 +870,7 @@ if (v8_use_external_startup_data) {
# This is split out to be a non-code containing target that the Chromium browser
# DLL can depend upon to get only a version string.
v8_source_set("v8_version") {
v8_header_set("v8_version") {
configs = [ ":internal_config" ]
sources = [
@ -928,8 +932,6 @@ v8_source_set("v8_base") {
"src/ast/ast-expression-rewriter.h",
"src/ast/ast-function-literal-id-reindexer.cc",
"src/ast/ast-function-literal-id-reindexer.h",
"src/ast/ast-literal-reindexer.cc",
"src/ast/ast-literal-reindexer.h",
"src/ast/ast-numbering.cc",
"src/ast/ast-numbering.h",
"src/ast/ast-traversal-visitor.h",
@ -967,8 +969,14 @@ v8_source_set("v8_base") {
"src/bootstrapper.cc",
"src/bootstrapper.h",
"src/builtins/builtins-api.cc",
"src/builtins/builtins-arguments.cc",
"src/builtins/builtins-arguments.h",
"src/builtins/builtins-array.cc",
"src/builtins/builtins-arraybuffer.cc",
"src/builtins/builtins-async-function.cc",
"src/builtins/builtins-async-iterator.cc",
"src/builtins/builtins-async.cc",
"src/builtins/builtins-async.h",
"src/builtins/builtins-boolean.cc",
"src/builtins/builtins-call.cc",
"src/builtins/builtins-callsite.cc",
@ -990,16 +998,19 @@ v8_source_set("v8_base") {
"src/builtins/builtins-math.cc",
"src/builtins/builtins-number.cc",
"src/builtins/builtins-object.cc",
"src/builtins/builtins-object.h",
"src/builtins/builtins-promise.cc",
"src/builtins/builtins-promise.h",
"src/builtins/builtins-proxy.cc",
"src/builtins/builtins-reflect.cc",
"src/builtins/builtins-regexp.cc",
"src/builtins/builtins-regexp.h",
"src/builtins/builtins-sharedarraybuffer.cc",
"src/builtins/builtins-string.cc",
"src/builtins/builtins-symbol.cc",
"src/builtins/builtins-typedarray.cc",
"src/builtins/builtins-utils.h",
"src/builtins/builtins-wasm.cc",
"src/builtins/builtins.cc",
"src/builtins/builtins.h",
"src/cached-powers.cc",
@ -1132,8 +1143,6 @@ v8_source_set("v8_base") {
"src/compiler/js-frame-specialization.h",
"src/compiler/js-generic-lowering.cc",
"src/compiler/js-generic-lowering.h",
"src/compiler/js-global-object-specialization.cc",
"src/compiler/js-global-object-specialization.h",
"src/compiler/js-graph.cc",
"src/compiler/js-graph.h",
"src/compiler/js-inlining-heuristic.cc",
@ -1146,6 +1155,8 @@ v8_source_set("v8_base") {
"src/compiler/js-native-context-specialization.h",
"src/compiler/js-operator.cc",
"src/compiler/js-operator.h",
"src/compiler/js-type-hint-lowering.cc",
"src/compiler/js-type-hint-lowering.h",
"src/compiler/js-typed-lowering.cc",
"src/compiler/js-typed-lowering.h",
"src/compiler/jump-threading.cc",
@ -1324,6 +1335,8 @@ v8_source_set("v8_base") {
"src/dateparser-inl.h",
"src/dateparser.cc",
"src/dateparser.h",
"src/debug/debug-coverage.cc",
"src/debug/debug-coverage.h",
"src/debug/debug-evaluate.cc",
"src/debug/debug-evaluate.h",
"src/debug/debug-frames.cc",
@ -1380,10 +1393,13 @@ v8_source_set("v8_base") {
"src/feedback-vector-inl.h",
"src/feedback-vector.cc",
"src/feedback-vector.h",
"src/ffi/ffi-compiler.cc",
"src/ffi/ffi-compiler.h",
"src/field-index-inl.h",
"src/field-index.h",
"src/field-type.cc",
"src/field-type.h",
"src/find-and-replace-pattern.h",
"src/fixed-dtoa.cc",
"src/fixed-dtoa.h",
"src/flag-definitions.h",
@ -1453,7 +1469,6 @@ v8_source_set("v8_base") {
"src/ic/access-compiler-data.h",
"src/ic/access-compiler.cc",
"src/ic/access-compiler.h",
"src/ic/accessor-assembler-impl.h",
"src/ic/accessor-assembler.cc",
"src/ic/accessor-assembler.h",
"src/ic/call-optimization.cc",
@ -1462,8 +1477,6 @@ v8_source_set("v8_base") {
"src/ic/handler-compiler.h",
"src/ic/handler-configuration-inl.h",
"src/ic/handler-configuration.h",
"src/ic/ic-compiler.cc",
"src/ic/ic-compiler.h",
"src/ic/ic-inl.h",
"src/ic/ic-state.cc",
"src/ic/ic-state.h",
@ -1537,6 +1550,7 @@ v8_source_set("v8_base") {
"src/json-stringifier.h",
"src/keys.cc",
"src/keys.h",
"src/label.h",
"src/layout-descriptor-inl.h",
"src/layout-descriptor.cc",
"src/layout-descriptor.h",
@ -1557,6 +1571,7 @@ v8_source_set("v8_base") {
"src/machine-type.cc",
"src/machine-type.h",
"src/macro-assembler.h",
"src/managed.h",
"src/map-updater.cc",
"src/map-updater.h",
"src/messages.cc",
@ -1569,9 +1584,12 @@ v8_source_set("v8_base") {
"src/objects-printer.cc",
"src/objects.cc",
"src/objects.h",
"src/objects/literal-objects.cc",
"src/objects/literal-objects.h",
"src/objects/module-info.h",
"src/objects/object-macros-undef.h",
"src/objects/object-macros.h",
"src/objects/regexp-match-info.h",
"src/objects/scope-info.cc",
"src/objects/scope-info.h",
"src/ostreams.cc",
@ -1594,6 +1612,8 @@ v8_source_set("v8_base") {
"src/parsing/preparse-data-format.h",
"src/parsing/preparse-data.cc",
"src/parsing/preparse-data.h",
"src/parsing/preparsed-scope-data.cc",
"src/parsing/preparsed-scope-data.h",
"src/parsing/preparser.cc",
"src/parsing/preparser.h",
"src/parsing/rewriter.cc",
@ -1692,7 +1712,6 @@ v8_source_set("v8_base") {
"src/runtime/runtime-proxy.cc",
"src/runtime/runtime-regexp.cc",
"src/runtime/runtime-scopes.cc",
"src/runtime/runtime-simd.cc",
"src/runtime/runtime-strings.cc",
"src/runtime/runtime-symbol.cc",
"src/runtime/runtime-test.cc",
@ -1780,14 +1799,16 @@ v8_source_set("v8_base") {
"src/vm-state-inl.h",
"src/vm-state.h",
"src/wasm/decoder.h",
"src/wasm/function-body-decoder-impl.h",
"src/wasm/function-body-decoder.cc",
"src/wasm/function-body-decoder.h",
"src/wasm/leb-helper.h",
"src/wasm/managed.h",
"src/wasm/module-decoder.cc",
"src/wasm/module-decoder.h",
"src/wasm/signature-map.cc",
"src/wasm/signature-map.h",
"src/wasm/wasm-code-specialization.cc",
"src/wasm/wasm-code-specialization.h",
"src/wasm/wasm-debug.cc",
"src/wasm/wasm-external-refs.cc",
"src/wasm/wasm-external-refs.h",
@ -2532,6 +2553,21 @@ group("gn_all") {
}
}
group("v8_clusterfuzz") {
deps = [
":d8",
]
if (v8_multi_arch_build) {
deps += [
":d8(//build/toolchain/linux:clang_x64)",
":d8(//build/toolchain/linux:clang_x64_v8_arm64)",
":d8(//build/toolchain/linux:clang_x86)",
":d8(//build/toolchain/linux:clang_x86_v8_arm)",
]
}
}
group("v8_fuzzers") {
testonly = true
deps = [
@ -2609,8 +2645,6 @@ v8_executable("d8") {
"//build/win:default_exe_manifest",
]
# TODO(jochen): Add support for vtunejit.
if (is_posix) {
sources += [ "src/d8-posix.cc" ]
} else if (is_win) {
@ -2629,6 +2663,10 @@ v8_executable("d8") {
if (v8_enable_inspector) {
defines += [ "V8_INSPECTOR_ENABLED" ]
}
if (v8_enable_vtunejit) {
deps += [ "//src/third_party/vtune:v8_vtune" ]
}
}
v8_isolate_run("d8") {
@ -3064,3 +3102,23 @@ v8_source_set("wasm_data_section_fuzzer") {
v8_fuzzer("wasm_data_section_fuzzer") {
}
v8_source_set("wasm_compile_fuzzer") {
sources = [
"test/fuzzer/wasm-compile.cc",
]
deps = [
":fuzzer_support",
":wasm_module_runner",
":wasm_test_signatures",
]
configs = [
":external_config",
":internal_config_base",
]
}
v8_fuzzer("wasm_compile_fuzzer") {
}

1531
deps/v8/ChangeLog vendored

File diff suppressed because it is too large Load Diff

21
deps/v8/DEPS vendored
View File

@ -8,23 +8,23 @@ vars = {
deps = {
"v8/build":
Var("chromium_url") + "/chromium/src/build.git" + "@" + "f55127ddc3632dbd6fef285c71ab4cb103e08f0c",
Var("chromium_url") + "/chromium/src/build.git" + "@" + "c7c2db69cd571523ce728c4d3dceedbd1896b519",
"v8/tools/gyp":
Var("chromium_url") + "/external/gyp.git" + "@" + "e7079f0e0e14108ab0dba58728ff219637458563",
"v8/third_party/icu":
Var("chromium_url") + "/chromium/deps/icu.git" + "@" + "9cd2828740572ba6f694b9365236a8356fd06147",
Var("chromium_url") + "/chromium/deps/icu.git" + "@" + "450be73c9ee8ae29d43d4fdc82febb2a5f62bfb5",
"v8/third_party/instrumented_libraries":
Var("chromium_url") + "/chromium/src/third_party/instrumented_libraries.git" + "@" + "5b6f777da671be977f56f0e8fc3469a3ccbb4474",
"v8/buildtools":
Var("chromium_url") + "/chromium/buildtools.git" + "@" + "cb12d6e8641f0c9b0fbbfa4bf17c55c6c0d3c38f",
Var("chromium_url") + "/chromium/buildtools.git" + "@" + "94cdccbebc7a634c27145a3d84089e85fbb42e69",
"v8/base/trace_event/common":
Var("chromium_url") + "/chromium/src/base/trace_event/common.git" + "@" + "06294c8a4a6f744ef284cd63cfe54dbf61eea290",
"v8/third_party/jinja2":
Var("chromium_url") + "/chromium/src/third_party/jinja2.git" + "@" + "b61a2c009a579593a259c1b300e0ad02bf48fd78",
Var("chromium_url") + "/chromium/src/third_party/jinja2.git" + "@" + "d34383206fa42d52faa10bb9931d6d538f3a57e0",
"v8/third_party/markupsafe":
Var("chromium_url") + "/chromium/src/third_party/markupsafe.git" + "@" + "484a5661041cac13bfc688a26ec5434b05d18961",
Var("chromium_url") + "/chromium/src/third_party/markupsafe.git" + "@" + "8f45f5cfa0009d2a70589bcda0349b8cb2b72783",
"v8/tools/swarming_client":
Var('chromium_url') + '/external/swarming.client.git' + '@' + "ebc8dab6f8b8d79ec221c94de39a921145abd404",
Var('chromium_url') + '/external/swarming.client.git' + '@' + "11e31afa5d330756ff87aa12064bb5d032896cb5",
"v8/testing/gtest":
Var("chromium_url") + "/external/github.com/google/googletest.git" + "@" + "6f8a66431cb592dad629028a50b3dd418a408c87",
"v8/testing/gmock":
@ -33,13 +33,14 @@ deps = {
Var("chromium_url") + "/v8/deps/third_party/benchmarks.git" + "@" + "05d7188267b4560491ff9155c5ee13e207ecd65f",
"v8/test/mozilla/data":
Var("chromium_url") + "/v8/deps/third_party/mozilla-tests.git" + "@" + "f6c578a10ea707b1a8ab0b88943fe5115ce2b9be",
"v8/test/simdjs/data": Var("chromium_url") + "/external/github.com/tc39/ecmascript_simd.git" + "@" + "baf493985cb9ea7cdbd0d68704860a8156de9556",
"v8/test/test262/data":
Var("chromium_url") + "/external/github.com/tc39/test262.git" + "@" + "6a0f1189eb00d38ef9760cb65cbc41c066876cde",
Var("chromium_url") + "/external/github.com/tc39/test262.git" + "@" + "a72ee6d91275aa6524e84a9b7070103411ef2689",
"v8/test/test262/harness":
Var("chromium_url") + "/external/github.com/test262-utils/test262-harness-py.git" + "@" + "0f2acdd882c84cff43b9d60df7574a1901e2cdcd",
"v8/tools/clang":
Var("chromium_url") + "/chromium/src/tools/clang.git" + "@" + "f7ce1a5678e5addc015aed5f1e7734bbd2caac7c",
Var("chromium_url") + "/chromium/src/tools/clang.git" + "@" + "9913fb19b687b0c858f697efd7bd2468d789a3d5",
"v8/test/wasm-js":
Var("chromium_url") + "/external/github.com/WebAssembly/spec.git" + "@" + "b8b919e4a0d52db4d3d762e731e615bc3a38b3b2",
}
deps_os = {
@ -47,7 +48,7 @@ deps_os = {
"v8/third_party/android_tools":
Var("chromium_url") + "/android_tools.git" + "@" + "b43a6a289a7588b1769814f04dd6c7d7176974cc",
"v8/third_party/catapult":
Var('chromium_url') + "/external/github.com/catapult-project/catapult.git" + "@" + "143ba4ddeb05e6165fb8413c5f3f47d342922d24",
Var('chromium_url') + "/external/github.com/catapult-project/catapult.git" + "@" + "246a39a82c2213d913a96fff020a263838dc76e6",
},
"win": {
"v8/third_party/cygwin":

4
deps/v8/Makefile vendored
View File

@ -51,6 +51,10 @@ endif
ifeq ($(objectprint), on)
GYPFLAGS += -Dv8_object_print=1
endif
# verifycsa=on
ifeq ($(verifycsa), on)
GYPFLAGS += -Dv8_enable_verify_csa=1
endif
# verifyheap=on
ifeq ($(verifyheap), on)
GYPFLAGS += -Dv8_enable_verify_heap=1

View File

@ -79,7 +79,7 @@ def _V8PresubmitChecks(input_api, output_api):
"Copyright header, trailing whitespaces and two empty lines " \
"between declarations check failed"))
if not StatusFilesProcessor().RunOnFiles(
input_api.AffectedFiles(include_deletes=False)):
input_api.AffectedFiles(include_deletes=True)):
results.append(output_api.PresubmitError("Status file check failed"))
results.extend(input_api.canned_checks.CheckAuthorizedAuthor(
input_api, output_api))

View File

@ -1,25 +0,0 @@
# Copyright 2015 The V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import("//build/config/v8_target_cpu.gni")
if (((v8_current_cpu == "x86" || v8_current_cpu == "x64" ||
v8_current_cpu == "x87") && (is_linux || is_mac)) ||
(v8_current_cpu == "ppc64" && is_linux)) {
v8_enable_gdbjit_default = true
}
v8_imminent_deprecation_warnings_default = true
# Add simple extras solely for the purpose of the cctests.
v8_extra_library_files = [ "//test/cctest/test-extra.js" ]
v8_experimental_extra_library_files =
[ "//test/cctest/test-experimental-extra.js" ]
v8_enable_inspector_override = true
declare_args() {
# Use static libraries instead of source_sets.
v8_static_library = false
}

View File

@ -61,6 +61,11 @@ template("v8_isolate_run") {
} else {
asan = "0"
}
if (is_lsan) {
lsan = "1"
} else {
lsan = "0"
}
if (is_msan) {
msan = "1"
} else {
@ -158,6 +163,8 @@ template("v8_isolate_run") {
"--config-variable",
"is_gn=1",
"--config-variable",
"lsan=$lsan",
"--config-variable",
"msan=$msan",
"--config-variable",
"tsan=$tsan",

18
deps/v8/gni/v8.gni vendored
View File

@ -4,12 +4,14 @@
import("//build/config/sanitizers/sanitizers.gni")
import("//build/config/v8_target_cpu.gni")
import("//build_overrides/v8.gni")
declare_args() {
# Includes files needed for correctness fuzzing.
v8_correctness_fuzzer = false
# Adds additional compile target for building multiple architectures at once.
v8_multi_arch_build = false
# Indicate if valgrind was fetched as a custom deps to make it available on
# swarming.
v8_has_valgrind = false
@ -36,7 +38,10 @@ declare_args() {
v8_enable_i18n_support = true
# Enable inspector. See include/v8-inspector.h.
v8_enable_inspector = v8_enable_inspector_override
v8_enable_inspector = true
# Use static libraries instead of source_sets.
v8_static_library = false
}
if (v8_use_external_startup_data == "") {
@ -107,6 +112,15 @@ template("v8_source_set") {
}
}
template("v8_header_set") {
source_set(target_name) {
forward_variables_from(invoker, "*", [ "configs" ])
configs += invoker.configs
configs -= v8_remove_configs
configs += v8_add_configs
}
}
template("v8_executable") {
executable(target_name) {
forward_variables_from(invoker,

View File

@ -47,7 +47,6 @@
'../test/optimize_for_size.gyp:*',
'../test/perf.gyp:*',
'../test/preparser/preparser.gyp:*',
'../test/simdjs/simdjs.gyp:*',
'../test/test262/test262.gyp:*',
'../test/webkit/webkit.gyp:*',
'../tools/check-static-initializers.gyp:*',

View File

@ -33,6 +33,8 @@
'v8_enable_gdbjit%': 0,
'v8_enable_verify_csa%': 0,
'v8_object_print%': 0,
'v8_enable_verify_heap%': 0,
@ -78,6 +80,9 @@
['v8_enable_gdbjit==1', {
'defines': ['ENABLE_GDB_JIT_INTERFACE',],
}],
['v8_enable_verify_csa==1', {
'defines': ['ENABLE_VERIFY_CSA',],
}],
['v8_object_print==1', {
'defines': ['OBJECT_PRINT',],
}],

View File

@ -75,6 +75,7 @@
'--config-variable', 'has_valgrind=<(has_valgrind)',
'--config-variable', 'icu_use_data_file_flag=<(icu_use_data_file_flag)',
'--config-variable', 'is_gn=0',
'--config-variable', 'lsan=<(lsan)',
'--config-variable', 'msan=<(msan)',
'--config-variable', 'tsan=<(tsan)',
'--config-variable', 'coverage=<(coverage)',

View File

@ -315,6 +315,8 @@
'defines': [
'V8_TARGET_ARCH_S390_LE_SIM',
],
}, {
'cflags': [ '-march=z196' ],
}],
],
}], # s390

View File

@ -116,9 +116,7 @@ class V8_EXPORT Debug {
virtual Local<Value> GetCallbackData() const = 0;
/**
* Client data passed to DebugBreakForCommand function. The
* debugger takes ownership of the data and will delete it even if
* there is no message handler.
* This is now a dummy that returns nullptr.
*/
virtual ClientData* GetClientData() const = 0;
@ -132,23 +130,18 @@ class V8_EXPORT Debug {
*
* \param event_details object providing information about the debug event
*
* A EventCallback2 does not take possession of the event data,
* A EventCallback does not take possession of the event data,
* and must not rely on the data persisting after the handler returns.
*/
typedef void (*EventCallback)(const EventDetails& event_details);
/**
* Debug message callback function.
*
* \param message the debug message handler message object
*
* A MessageHandler does not take possession of the message data,
* and must not rely on the data persisting after the handler returns.
* This is now a no-op.
*/
typedef void (*MessageHandler)(const Message& message);
/**
* Callback function for the host to ensure debug messages are processed.
* This is now a no-op.
*/
typedef void (*DebugMessageDispatchHandler)();
@ -167,11 +160,12 @@ class V8_EXPORT Debug {
V8_DEPRECATED("No longer supported",
static bool CheckDebugBreak(Isolate* isolate));
// Message based interface. The message protocol is JSON.
// This is now a no-op.
V8_DEPRECATED("No longer supported",
static void SetMessageHandler(Isolate* isolate,
MessageHandler handler));
// This is now a no-op.
V8_DEPRECATED("No longer supported",
static void SendCommand(Isolate* isolate,
const uint16_t* command, int length,
@ -200,44 +194,7 @@ class V8_EXPORT Debug {
v8::Local<v8::Function> fun,
Local<Value> data = Local<Value>());
/**
* Returns a mirror object for the given object.
*/
V8_DEPRECATED("No longer supported",
static MaybeLocal<Value> GetMirror(Local<Context> context,
v8::Local<v8::Value> obj));
/**
* Makes V8 process all pending debug messages.
*
* From V8 point of view all debug messages come asynchronously (e.g. from
* remote debugger) but they all must be handled synchronously: V8 cannot
* do 2 things at one time so normal script execution must be interrupted
* for a while.
*
* Generally when message arrives V8 may be in one of 3 states:
* 1. V8 is running script; V8 will automatically interrupt and process all
* pending messages;
* 2. V8 is suspended on debug breakpoint; in this state V8 is dedicated
* to reading and processing debug messages;
* 3. V8 is not running at all or has called some long-working C++ function;
* by default it means that processing of all debug messages will be deferred
* until V8 gets control again; however, embedding application may improve
* this by manually calling this method.
*
* Technically this method in many senses is equivalent to executing empty
* script:
* 1. It does nothing except for processing all pending debug messages.
* 2. It should be invoked with the same precautions and from the same context
* as V8 script would be invoked from, because:
* a. with "evaluate" command it can do whatever normal script can do,
* including all native calls;
* b. no other thread should call V8 while this method is running
* (v8::Locker may be used here).
*
* "Evaluate" debug command behavior currently is not specified in scope
* of this method.
*/
// This is now a no-op.
V8_DEPRECATED("No longer supported",
static void ProcessDebugMessages(Isolate* isolate));

View File

@ -85,6 +85,8 @@ class V8_EXPORT V8ContextInfo {
StringView auxData;
bool hasMemoryOnConsole;
static int executionContextId(v8::Local<v8::Context> context);
private:
// Disallow copying and allocating this one.
enum NotNullTagEnum { NotNullLiteral };
@ -156,8 +158,6 @@ class V8_EXPORT V8InspectorSession {
virtual void releaseObjectGroup(const StringView&) = 0;
};
enum class V8ConsoleAPIType { kClear, kDebug, kLog, kInfo, kWarning, kError };
class V8_EXPORT V8InspectorClient {
public:
virtual ~V8InspectorClient() {}
@ -189,7 +189,8 @@ class V8_EXPORT V8InspectorClient {
virtual void installAdditionalCommandLineAPI(v8::Local<v8::Context>,
v8::Local<v8::Object>) {}
virtual void consoleAPIMessage(int contextGroupId, V8ConsoleAPIType,
virtual void consoleAPIMessage(int contextGroupId,
v8::Isolate::MessageErrorLevel level,
const StringView& message,
const StringView& url, unsigned lineNumber,
unsigned columnNumber, V8StackTrace*) {}
@ -201,6 +202,7 @@ class V8_EXPORT V8InspectorClient {
virtual void consoleTime(const StringView& title) {}
virtual void consoleTimeEnd(const StringView& title) {}
virtual void consoleTimeStamp(const StringView& title) {}
virtual void consoleClear(int contextGroupId) {}
virtual double currentTimeMS() { return 0; }
typedef void (*TimerCallback)(void*);
virtual void startRepeatingTimer(double, TimerCallback, void* data) {}

View File

@ -5,6 +5,7 @@
#ifndef V8_V8_PROFILER_H_
#define V8_V8_PROFILER_H_
#include <unordered_set>
#include <vector>
#include "v8.h" // NOLINT(build/include)
@ -392,8 +393,7 @@ class V8_EXPORT HeapGraphNode {
// snapshot items together.
kConsString = 10, // Concatenated string. A pair of pointers to strings.
kSlicedString = 11, // Sliced string. A fragment of another string.
kSymbol = 12, // A Symbol (ES6).
kSimdValue = 13 // A SIMD value stored in the heap (Proposed ES7).
kSymbol = 12 // A Symbol (ES6).
};
/** Returns node type (see HeapGraphNode::Type). */
@ -630,6 +630,24 @@ class V8_EXPORT HeapProfiler {
kSamplingForceGC = 1 << 0,
};
typedef std::unordered_set<const v8::PersistentBase<v8::Value>*>
RetainerChildren;
typedef std::vector<std::pair<v8::RetainedObjectInfo*, RetainerChildren>>
RetainerGroups;
typedef std::vector<std::pair<const v8::PersistentBase<v8::Value>*,
const v8::PersistentBase<v8::Value>*>>
RetainerEdges;
struct RetainerInfos {
RetainerGroups groups;
RetainerEdges edges;
};
/**
* Callback function invoked to retrieve all RetainerInfos from the embedder.
*/
typedef RetainerInfos (*GetRetainerInfosCallback)(v8::Isolate* isolate);
/**
* Callback function invoked for obtaining RetainedObjectInfo for
* the given JavaScript wrapper object. It is prohibited to enter V8
@ -782,6 +800,8 @@ class V8_EXPORT HeapProfiler {
uint16_t class_id,
WrapperInfoCallback callback);
void SetGetRetainerInfosCallback(GetRetainerInfosCallback callback);
/**
* Default value of persistent handle class ID. Must not be used to
* define a class. Can be used to reset a class of a persistent

View File

@ -6,6 +6,7 @@
#define V8_UTIL_H_
#include "v8.h" // NOLINT(build/include)
#include <assert.h>
#include <map>
#include <vector>
@ -210,7 +211,7 @@ class PersistentValueMapBase {
* key.
*/
void RegisterExternallyReferencedObject(K& key) {
DCHECK(Contains(key));
assert(Contains(key));
V8::RegisterExternallyReferencedObject(
reinterpret_cast<internal::Object**>(FromVal(Traits::Get(&impl_, key))),
reinterpret_cast<internal::Isolate*>(GetIsolate()));

View File

@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 5
#define V8_MINOR_VERSION 7
#define V8_BUILD_NUMBER 492
#define V8_PATCH_LEVEL 69
#define V8_MINOR_VERSION 8
#define V8_BUILD_NUMBER 283
#define V8_PATCH_LEVEL 38
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)

91
deps/v8/include/v8.h vendored
View File

@ -962,20 +962,31 @@ class V8_EXPORT Data {
class ScriptOriginOptions {
public:
V8_INLINE ScriptOriginOptions(bool is_shared_cross_origin = false,
bool is_opaque = false, bool is_wasm = false)
bool is_opaque = false, bool is_wasm = false,
bool is_module = false)
: flags_((is_shared_cross_origin ? kIsSharedCrossOrigin : 0) |
(is_wasm ? kIsWasm : 0) | (is_opaque ? kIsOpaque : 0)) {}
(is_wasm ? kIsWasm : 0) | (is_opaque ? kIsOpaque : 0) |
(is_module ? kIsModule : 0)) {}
V8_INLINE ScriptOriginOptions(int flags)
: flags_(flags & (kIsSharedCrossOrigin | kIsOpaque | kIsWasm)) {}
: flags_(flags &
(kIsSharedCrossOrigin | kIsOpaque | kIsWasm | kIsModule)) {}
bool IsSharedCrossOrigin() const {
return (flags_ & kIsSharedCrossOrigin) != 0;
}
bool IsOpaque() const { return (flags_ & kIsOpaque) != 0; }
bool IsWasm() const { return (flags_ & kIsWasm) != 0; }
bool IsModule() const { return (flags_ & kIsModule) != 0; }
int Flags() const { return flags_; }
private:
enum { kIsSharedCrossOrigin = 1, kIsOpaque = 1 << 1, kIsWasm = 1 << 2 };
enum {
kIsSharedCrossOrigin = 1,
kIsOpaque = 1 << 1,
kIsWasm = 1 << 2,
kIsModule = 1 << 3
};
const int flags_;
};
@ -992,7 +1003,8 @@ class ScriptOrigin {
Local<Integer> script_id = Local<Integer>(),
Local<Value> source_map_url = Local<Value>(),
Local<Boolean> resource_is_opaque = Local<Boolean>(),
Local<Boolean> is_wasm = Local<Boolean>());
Local<Boolean> is_wasm = Local<Boolean>(),
Local<Boolean> is_module = Local<Boolean>());
V8_INLINE Local<Value> ResourceName() const;
V8_INLINE Local<Integer> ResourceLineOffset() const;
@ -1183,6 +1195,8 @@ class V8_EXPORT ScriptCompiler {
// alive.
V8_INLINE const CachedData* GetCachedData() const;
V8_INLINE const ScriptOriginOptions& GetResourceOptions() const;
// Prevent copying.
Source(const Source&) = delete;
Source& operator=(const Source&) = delete;
@ -1425,7 +1439,7 @@ class V8_EXPORT ScriptCompiler {
private:
static V8_WARN_UNUSED_RESULT MaybeLocal<UnboundScript> CompileUnboundInternal(
Isolate* isolate, Source* source, CompileOptions options, bool is_module);
Isolate* isolate, Source* source, CompileOptions options);
};
@ -1771,7 +1785,7 @@ class V8_EXPORT ValueSerializer {
/*
* Marks an ArrayBuffer as havings its contents transferred out of band.
* Pass the corresponding JSArrayBuffer in the deserializing context to
* Pass the corresponding ArrayBuffer in the deserializing context to
* ValueDeserializer::TransferArrayBuffer.
*/
void TransferArrayBuffer(uint32_t transfer_id,
@ -2158,12 +2172,6 @@ class V8_EXPORT Value : public Data {
*/
bool IsFloat64Array() const;
/**
* Returns true if this value is a SIMD Float32x4.
* This is an experimental feature.
*/
bool IsFloat32x4() const;
/**
* Returns true if this value is a DataView.
*/
@ -2326,7 +2334,7 @@ class V8_EXPORT String : public Name {
enum Encoding {
UNKNOWN_ENCODING = 0x1,
TWO_BYTE_ENCODING = 0x0,
ONE_BYTE_ENCODING = 0x4
ONE_BYTE_ENCODING = 0x8
};
/**
* Returns the number of characters in this string.
@ -2442,6 +2450,7 @@ class V8_EXPORT String : public Name {
private:
friend class internal::Heap;
friend class v8::String;
};
/**
@ -4613,8 +4622,11 @@ class V8_EXPORT External : public Value {
static void CheckCast(v8::Value* obj);
};
#define V8_INTRINSICS_LIST(F) F(ArrayProto_values, array_values_iterator)
#define V8_INTRINSICS_LIST(F) \
F(ArrayProto_entries, array_entries_iterator) \
F(ArrayProto_forEach, array_for_each_iterator) \
F(ArrayProto_keys, array_keys_iterator) \
F(ArrayProto_values, array_values_iterator)
enum Intrinsic {
#define V8_DECL_INTRINSIC(name, iname) k##name,
@ -6360,7 +6372,8 @@ class V8_EXPORT Isolate {
create_histogram_callback(nullptr),
add_histogram_sample_callback(nullptr),
array_buffer_allocator(nullptr),
external_references(nullptr) {}
external_references(nullptr),
allow_atomics_wait(true) {}
/**
* The optional entry_hook allows the host application to provide the
@ -6416,6 +6429,12 @@ class V8_EXPORT Isolate {
* entire lifetime of the isolate.
*/
intptr_t* external_references;
/**
* Whether calling Atomics.wait (a function that may block) is allowed in
* this isolate.
*/
bool allow_atomics_wait;
};
@ -6789,6 +6808,14 @@ class V8_EXPORT Isolate {
/** Returns the last context entered through V8's C++ API. */
Local<Context> GetEnteredContext();
/**
* Returns either the last context entered through V8's C++ API, or the
* context of the currently running microtask while processing microtasks.
* If a context is entered while executing a microtask, that context is
* returned.
*/
Local<Context> GetEnteredOrMicrotaskContext();
/**
* Schedules an exception to be thrown when returning to JavaScript. When an
* exception has been scheduled it is illegal to invoke any JavaScript
@ -6809,7 +6836,7 @@ class V8_EXPORT Isolate {
* for partially dependent handles only.
*/
template <typename T>
V8_DEPRECATE_SOON("Use EmbedderHeapTracer",
V8_DEPRECATED("Use EmbedderHeapTracer",
void SetObjectGroupId(const Persistent<T>& object,
UniqueId id));
@ -6821,7 +6848,7 @@ class V8_EXPORT Isolate {
* callback function.
*/
template <typename T>
V8_DEPRECATE_SOON("Use EmbedderHeapTracer",
V8_DEPRECATED("Use EmbedderHeapTracer",
void SetReferenceFromGroup(UniqueId id,
const Persistent<T>& child));
@ -6832,7 +6859,7 @@ class V8_EXPORT Isolate {
* is intended to be used in the before-garbage-collection callback function.
*/
template <typename T, typename S>
V8_DEPRECATE_SOON("Use EmbedderHeapTracer",
V8_DEPRECATED("Use EmbedderHeapTracer",
void SetReference(const Persistent<T>& parent,
const Persistent<S>& child));
@ -8476,10 +8503,10 @@ class Internals {
static const int kFixedArrayHeaderSize = 2 * kApiPointerSize;
static const int kContextHeaderSize = 2 * kApiPointerSize;
static const int kContextEmbedderDataIndex = 5;
static const int kFullStringRepresentationMask = 0x07;
static const int kStringEncodingMask = 0x4;
static const int kFullStringRepresentationMask = 0x0f;
static const int kStringEncodingMask = 0x8;
static const int kExternalTwoByteRepresentationTag = 0x02;
static const int kExternalOneByteRepresentationTag = 0x06;
static const int kExternalOneByteRepresentationTag = 0x0a;
static const int kIsolateEmbedderDataOffset = 0 * kApiPointerSize;
static const int kExternalMemoryOffset = 4 * kApiPointerSize;
@ -8504,11 +8531,11 @@ class Internals {
static const int kNodeIsIndependentShift = 3;
static const int kNodeIsActiveShift = 4;
static const int kJSApiObjectType = 0xbb;
static const int kJSObjectType = 0xbc;
static const int kJSApiObjectType = 0xb9;
static const int kJSObjectType = 0xba;
static const int kFirstNonstringType = 0x80;
static const int kOddballType = 0x83;
static const int kForeignType = 0x87;
static const int kOddballType = 0x82;
static const int kForeignType = 0x86;
static const int kUndefinedOddballKind = 5;
static const int kNullOddballKind = 3;
@ -9026,14 +9053,15 @@ ScriptOrigin::ScriptOrigin(Local<Value> resource_name,
Local<Integer> script_id,
Local<Value> source_map_url,
Local<Boolean> resource_is_opaque,
Local<Boolean> is_wasm)
Local<Boolean> is_wasm, Local<Boolean> is_module)
: resource_name_(resource_name),
resource_line_offset_(resource_line_offset),
resource_column_offset_(resource_column_offset),
options_(!resource_is_shared_cross_origin.IsEmpty() &&
resource_is_shared_cross_origin->IsTrue(),
!resource_is_opaque.IsEmpty() && resource_is_opaque->IsTrue(),
!is_wasm.IsEmpty() && is_wasm->IsTrue()),
!is_wasm.IsEmpty() && is_wasm->IsTrue(),
!is_module.IsEmpty() && is_module->IsTrue()),
script_id_(script_id),
source_map_url_(source_map_url) {}
@ -9082,13 +9110,16 @@ const ScriptCompiler::CachedData* ScriptCompiler::Source::GetCachedData()
return cached_data;
}
const ScriptOriginOptions& ScriptCompiler::Source::GetResourceOptions() const {
return resource_options;
}
Local<Boolean> Boolean::New(Isolate* isolate, bool value) {
return value ? True(isolate) : False(isolate);
}
void Template::Set(Isolate* isolate, const char* name, Local<Data> value) {
Set(String::NewFromUtf8(isolate, name, NewStringType::kNormal)
Set(String::NewFromUtf8(isolate, name, NewStringType::kInternalized)
.ToLocalChecked(),
value);
}

View File

@ -4,10 +4,11 @@
version: 1
cq_name: "v8"
cq_status_url: "https://chromium-cq-status.appspot.com"
git_repo_url: "https://chromium.googlesource.com/v8/v8"
commit_burst_delay: 60
max_commit_burst: 1
target_ref: "refs/pending/heads/master"
gerrit {}
rietveld {
url: "https://codereview.chromium.org"
}
@ -18,6 +19,11 @@ verifiers {
dry_run_access_list: "project-v8-tryjob-access"
}
gerrit_cq_ability {
committer_list: "project-v8-committers"
dry_run_access_list: "project-v8-tryjob-access"
}
tree_status {
tree_status_url: "https://v8-status.appspot.com"
}
@ -46,6 +52,11 @@ verifiers {
name: "v8_linux64_rel_ng_triggered"
triggered_by: "v8_linux64_rel_ng"
}
builders { name: "v8_linux64_verify_csa_rel_ng" }
builders {
name: "v8_linux64_verify_csa_rel_ng_triggered"
triggered_by: "v8_linux64_verify_csa_rel_ng"
}
builders { name: "v8_linux_arm64_rel_ng" }
builders {
name: "v8_linux_arm64_rel_ng_triggered"
@ -75,6 +86,11 @@ verifiers {
name: "v8_linux_rel_ng_triggered"
triggered_by: "v8_linux_rel_ng"
}
builders { name: "v8_linux_verify_csa_rel_ng" }
builders {
name: "v8_linux_verify_csa_rel_ng_triggered"
triggered_by: "v8_linux_verify_csa_rel_ng"
}
builders { name: "v8_mac_rel_ng" }
builders {
name: "v8_mac_rel_ng_triggered"

View File

@ -18,6 +18,12 @@
'ia32.debug': 'default_debug_x86',
'ia32.optdebug': 'default_optdebug_x86',
'ia32.release': 'default_release_x86',
'mipsel.debug': 'default_debug_mipsel',
'mipsel.optdebug': 'default_optdebug_mipsel',
'mipsel.release': 'default_release_mipsel',
'mips64el.debug': 'default_debug_mips64el',
'mips64el.optdebug': 'default_optdebug_mips64el',
'mips64el.release': 'default_release_mips64el',
'x64.debug': 'default_debug_x64',
'x64.optdebug': 'default_optdebug_x64',
'x64.release': 'default_release_x64',
@ -39,12 +45,14 @@
'V8 Linux - nosnap debug builder': 'gn_debug_x86_no_snap',
'V8 Linux - shared': 'gn_release_x86_shared_verify_heap',
'V8 Linux - noi18n - debug': 'gn_debug_x86_no_i18n',
'V8 Linux - verify csa': 'gn_release_x86_verify_csa',
# Linux64.
'V8 Linux64 - builder': 'gn_release_x64_valgrind',
'V8 Linux64 - debug builder': 'gn_debug_x64_valgrind',
'V8 Linux64 - custom snapshot - debug builder': 'gn_debug_x64_custom',
'V8 Linux64 - internal snapshot': 'gn_release_x64_internal',
'V8 Linux64 - gyp': 'gyp_release_x64',
'V8 Linux64 - verify csa': 'gn_release_x64_verify_csa',
# Windows.
'V8 Win32 - builder': 'gn_release_x86_minimal_symbols',
'V8 Win32 - debug builder': 'gn_debug_x86_minimal_symbols',
@ -147,6 +155,7 @@
},
'tryserver.v8': {
'v8_linux_rel_ng': 'gn_release_x86_gcmole_trybot',
'v8_linux_verify_csa_rel_ng': 'gn_release_x86_verify_csa',
'v8_linux_avx2_dbg': 'gn_debug_x86_trybot',
'v8_linux_nodcheck_rel_ng': 'gn_release_x86_minimal_symbols',
'v8_linux_dbg_ng': 'gn_debug_x86_trybot',
@ -157,6 +166,7 @@
'v8_linux_gcc_compile_rel': 'gn_release_x86_gcc_minimal_symbols',
'v8_linux_gcc_rel': 'gn_release_x86_gcc_minimal_symbols',
'v8_linux64_rel_ng': 'gn_release_x64_valgrind_trybot',
'v8_linux64_verify_csa_rel_ng': 'gn_release_x64_verify_csa',
'v8_linux64_gyp_rel_ng': 'gyp_release_x64',
'v8_linux64_avx2_rel_ng': 'gn_release_x64_trybot',
'v8_linux64_avx2_dbg': 'gn_debug_x64_trybot',
@ -210,6 +220,20 @@
'gn', 'debug', 'simulate_arm64', 'v8_enable_slow_dchecks'],
'default_release_arm64': [
'gn', 'release', 'simulate_arm64'],
'default_debug_mipsel': [
'gn', 'debug', 'simulate_mipsel', 'v8_enable_slow_dchecks',
'v8_full_debug'],
'default_optdebug_mipsel': [
'gn', 'debug', 'simulate_mipsel', 'v8_enable_slow_dchecks'],
'default_release_mipsel': [
'gn', 'release', 'simulate_mipsel'],
'default_debug_mips64el': [
'gn', 'debug', 'simulate_mips64el', 'v8_enable_slow_dchecks',
'v8_full_debug'],
'default_optdebug_mips64el': [
'gn', 'debug', 'simulate_mips64el', 'v8_enable_slow_dchecks'],
'default_release_mips64el': [
'gn', 'release', 'simulate_mips64el'],
'default_debug_x64': [
'gn', 'debug', 'x64', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_x64': [
@ -302,6 +326,9 @@
'gn', 'release_bot', 'x64', 'swarming', 'valgrind'],
'gn_release_x64_valgrind_trybot': [
'gn', 'release_trybot', 'x64', 'swarming', 'valgrind'],
'gn_release_x64_verify_csa': [
'gn', 'release_bot', 'x64', 'swarming', 'dcheck_always_on',
'v8_enable_slow_dchecks', 'v8_verify_csa'],
# GN debug configs for x64.
'gn_debug_x64': [
@ -359,6 +386,9 @@
'gn', 'release', 'x86', 'goma', 'shared', 'swarming', 'v8_verify_heap'],
'gn_release_x86_trybot': [
'gn', 'release_trybot', 'x86', 'swarming'],
'gn_release_x86_verify_csa': [
'gn', 'release_bot', 'x86', 'swarming', 'dcheck_always_on',
'v8_enable_slow_dchecks', 'v8_verify_csa'],
# Gyp debug configs for simulators.
'gyp_debug_simulate_x87': [
@ -628,7 +658,7 @@
},
'v8_correctness_fuzzer': {
'gn_args': 'v8_correctness_fuzzer=true',
'gn_args': 'v8_correctness_fuzzer=true v8_multi_arch_build=true',
},
'v8_disable_inspector': {
@ -694,6 +724,10 @@
'gyp_defines': 'v8_enable_verify_heap=1',
},
'v8_verify_csa': {
'gn_args': 'v8_enable_verify_csa=true',
},
'x64': {
'gn_args': 'target_cpu="x64"',
'gyp_defines': 'target_arch=x64',

View File

@ -8,7 +8,6 @@
#include "include/v8.h"
#include "src/allocation.h"
#include "src/globals.h"
#include "src/handles.h"
#include "src/property-details.h"
namespace v8 {
@ -16,6 +15,8 @@ namespace internal {
// Forward declarations.
class AccessorInfo;
template <typename T>
class Handle;
// The list of accessor descriptors. This is a second-order macro
// taking a macro to be applied to all accessor descriptor names.

View File

@ -5,11 +5,11 @@
#ifndef V8_API_EXPERIMENTAL_H_
#define V8_API_EXPERIMENTAL_H_
#include "src/handles.h"
namespace v8 {
namespace internal {
class Code;
template <typename T>
class MaybeHandle;
} // internal;
namespace experimental {
class FastAccessorBuilder;

View File

@ -538,8 +538,6 @@ MaybeHandle<JSObject> ApiNatives::InstantiateRemoteObject(
JSFunction::SetInitialMap(object_function, object_map,
isolate->factory()->null_value());
object_map->set_is_access_check_needed(true);
object_map->set_is_callable();
object_map->set_is_constructor(true);
Handle<JSObject> object = isolate->factory()->NewJSObject(object_function);
JSObject::ForceSetPrototype(object, isolate->factory()->null_value());

511
deps/v8/src/api.cc vendored
View File

@ -35,6 +35,7 @@
#include "src/contexts.h"
#include "src/conversions-inl.h"
#include "src/counters.h"
#include "src/debug/debug-coverage.h"
#include "src/debug/debug.h"
#include "src/deoptimizer.h"
#include "src/execution.h"
@ -47,6 +48,7 @@
#include "src/json-parser.h"
#include "src/json-stringifier.h"
#include "src/messages.h"
#include "src/objects-inl.h"
#include "src/parsing/parser.h"
#include "src/parsing/scanner-character-streams.h"
#include "src/pending-compilation-error-handler.h"
@ -283,7 +285,8 @@ static ScriptOrigin GetScriptOriginForScript(i::Isolate* isolate,
v8::Integer::New(v8_isolate, script->id()),
Utils::ToLocal(source_map_url),
v8::Boolean::New(v8_isolate, options.IsOpaque()),
v8::Boolean::New(v8_isolate, script->type() == i::Script::TYPE_WASM));
v8::Boolean::New(v8_isolate, script->type() == i::Script::TYPE_WASM),
v8::Boolean::New(v8_isolate, options.IsModule()));
return origin;
}
@ -2103,8 +2106,7 @@ MaybeLocal<Value> Module::Evaluate(Local<Context> context) {
}
MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
Isolate* v8_isolate, Source* source, CompileOptions options,
bool is_module) {
Isolate* v8_isolate, Source* source, CompileOptions options) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
PREPARE_FOR_EXECUTION_WITH_ISOLATE(isolate, ScriptCompiler, CompileUnbound,
UnboundScript);
@ -2149,7 +2151,7 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
result = i::Compiler::GetSharedFunctionInfoForScript(
str, name_obj, line_offset, column_offset, source->resource_options,
source_map_url, isolate->native_context(), NULL, &script_data, options,
i::NOT_NATIVES_CODE, is_module);
i::NOT_NATIVES_CODE);
has_pending_exception = result.is_null();
if (has_pending_exception && script_data != NULL) {
// This case won't happen during normal operation; we have compiled
@ -2178,15 +2180,22 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundScript(
Isolate* v8_isolate, Source* source, CompileOptions options) {
return CompileUnboundInternal(v8_isolate, source, options, false);
Utils::ApiCheck(
!source->GetResourceOptions().IsModule(),
"v8::ScriptCompiler::CompileUnboundScript",
"v8::ScriptCompiler::CompileModule must be used to compile modules");
return CompileUnboundInternal(v8_isolate, source, options);
}
Local<UnboundScript> ScriptCompiler::CompileUnbound(Isolate* v8_isolate,
Source* source,
CompileOptions options) {
RETURN_TO_LOCAL_UNCHECKED(
CompileUnboundInternal(v8_isolate, source, options, false),
Utils::ApiCheck(
!source->GetResourceOptions().IsModule(),
"v8::ScriptCompiler::CompileUnbound",
"v8::ScriptCompiler::CompileModule must be used to compile modules");
RETURN_TO_LOCAL_UNCHECKED(CompileUnboundInternal(v8_isolate, source, options),
UnboundScript);
}
@ -2194,8 +2203,11 @@ Local<UnboundScript> ScriptCompiler::CompileUnbound(Isolate* v8_isolate,
MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
Source* source,
CompileOptions options) {
Utils::ApiCheck(
!source->GetResourceOptions().IsModule(), "v8::ScriptCompiler::Compile",
"v8::ScriptCompiler::CompileModule must be used to compile modules");
auto isolate = context->GetIsolate();
auto maybe = CompileUnboundInternal(isolate, source, options, false);
auto maybe = CompileUnboundInternal(isolate, source, options);
Local<UnboundScript> result;
if (!maybe.ToLocal(&result)) return MaybeLocal<Script>();
v8::Context::Scope scope(context);
@ -2215,7 +2227,10 @@ MaybeLocal<Module> ScriptCompiler::CompileModule(Isolate* isolate,
Source* source) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
auto maybe = CompileUnboundInternal(isolate, source, kNoCompileOptions, true);
Utils::ApiCheck(source->GetResourceOptions().IsModule(),
"v8::ScriptCompiler::CompileModule",
"Invalid ScriptOrigin: is_module must be true");
auto maybe = CompileUnboundInternal(isolate, source, kNoCompileOptions);
Local<UnboundScript> unbound;
if (!maybe.ToLocal(&unbound)) return MaybeLocal<Module>();
@ -2271,9 +2286,14 @@ MaybeLocal<Function> ScriptCompiler::CompileFunctionInContext(
Function);
TRACE_EVENT0("v8", "V8.ScriptCompiler");
i::Handle<i::String> source_string;
int parameters_end_pos = i::kNoSourcePosition;
auto factory = isolate->factory();
if (arguments_count) {
if (i::FLAG_harmony_function_tostring) {
source_string = factory->NewStringFromStaticChars("(function anonymous(");
} else {
source_string = factory->NewStringFromStaticChars("(function(");
}
for (size_t i = 0; i < arguments_count; ++i) {
IsIdentifierHelper helper;
if (!helper.Check(*Utils::OpenHandle(*arguments[i]))) {
@ -2291,13 +2311,25 @@ MaybeLocal<Function> ScriptCompiler::CompileFunctionInContext(
',')).ToHandle(&source_string);
RETURN_ON_FAILED_EXECUTION(Function);
}
auto brackets = factory->NewStringFromStaticChars("){");
i::Handle<i::String> brackets;
if (i::FLAG_harmony_function_tostring) {
brackets = factory->NewStringFromStaticChars("\n) {");
parameters_end_pos = source_string->length() - 3;
} else {
brackets = factory->NewStringFromStaticChars("){");
}
has_pending_exception = !factory->NewConsString(source_string, brackets)
.ToHandle(&source_string);
RETURN_ON_FAILED_EXECUTION(Function);
} else {
if (i::FLAG_harmony_function_tostring) {
source_string =
factory->NewStringFromStaticChars("(function anonymous(\n) {");
parameters_end_pos = source_string->length() - 3;
} else {
source_string = factory->NewStringFromStaticChars("(function(){");
}
}
int scope_position = source_string->length();
has_pending_exception =
@ -2346,9 +2378,9 @@ MaybeLocal<Function> ScriptCompiler::CompileFunctionInContext(
has_pending_exception =
!i::Compiler::GetFunctionFromEval(
source_string, outer_info, context, i::SLOPPY,
i::ONLY_SINGLE_FUNCTION_LITERAL, eval_scope_position, eval_position,
line_offset, column_offset - scope_position, name_obj,
source->resource_options)
i::ONLY_SINGLE_FUNCTION_LITERAL, parameters_end_pos,
eval_scope_position, eval_position, line_offset,
column_offset - scope_position, name_obj, source->resource_options)
.ToHandle(&fun);
if (has_pending_exception) {
isolate->ReportPendingMessages();
@ -2415,12 +2447,19 @@ MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
}
source->info->set_script(script);
if (source->info->literal() == nullptr) {
source->parser->ReportErrors(isolate, script);
}
source->parser->UpdateStatistics(isolate, script);
// Do the parsing tasks which need to be done on the main thread. This will
// also handle parse errors.
source->parser->Internalize(isolate, script,
source->info->literal() == nullptr);
i::DeferredHandleScope deferred_handle_scope(isolate);
{
// Internalize AST values on the main thread.
source->info->ReopenHandlesInNewHandleScope();
source->info->ast_value_factory()->Internalize(isolate);
source->parser->HandleSourceURLComments(isolate, script);
}
source->info->set_deferred_handles(deferred_handle_scope.Detach());
i::Handle<i::SharedFunctionInfo> result;
if (source->info->literal() != nullptr) {
@ -2985,7 +3024,7 @@ Local<Value> NativeWeakMap::Get(Local<Value> v8_key) {
bool NativeWeakMap::Has(Local<Value> v8_key) {
i::Handle<i::JSWeakMap> weak_collection = Utils::OpenHandle(this);
i::Isolate* isolate = weak_collection->GetIsolate();
ENTER_V8(isolate);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScope scope(isolate);
i::Handle<i::Object> key = Utils::OpenHandle(*v8_key);
if (!key->IsJSReceiver() && !key->IsSymbol()) {
@ -3006,7 +3045,7 @@ bool NativeWeakMap::Has(Local<Value> v8_key) {
bool NativeWeakMap::Delete(Local<Value> v8_key) {
i::Handle<i::JSWeakMap> weak_collection = Utils::OpenHandle(this);
i::Isolate* isolate = weak_collection->GetIsolate();
ENTER_V8(isolate);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::HandleScope scope(isolate);
i::Handle<i::Object> key = Utils::OpenHandle(*v8_key);
if (!key->IsJSReceiver() && !key->IsSymbol()) {
@ -3234,9 +3273,10 @@ Maybe<bool> ValueDeserializer::ReadHeader(Local<Context> context) {
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
DCHECK(read_header);
static const uint32_t kMinimumNonLegacyVersion = 13;
if (GetWireFormatVersion() < kMinimumNonLegacyVersion &&
!private_->supports_legacy_wire_format) {
// TODO(jbroman): Today, all wire formats are "legacy". When a more supported
// format is added, compare the version of the internal serializer to the
// minimum non-legacy version number.
if (!private_->supports_legacy_wire_format) {
isolate->Throw(*isolate->factory()->NewError(
i::MessageTemplate::kDataCloneDeserializationVersionError));
has_pending_exception = true;
@ -4062,7 +4102,7 @@ bool Value::SameValue(Local<Value> that) const {
Local<String> Value::TypeOf(v8::Isolate* external_isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
ENTER_V8(isolate);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
LOG_API(isolate, Value, TypeOf);
return Utils::ToLocal(i::Object::TypeOf(isolate, Utils::OpenHandle(this)));
}
@ -4465,12 +4505,11 @@ bool v8::Object::SetPrototype(Local<Value> value) {
return SetPrototype(context, value).FromMaybe(false);
}
Local<Object> v8::Object::FindInstanceInPrototypeChain(
v8::Local<FunctionTemplate> tmpl) {
auto isolate = Utils::OpenHandle(this)->GetIsolate();
i::PrototypeIterator iter(isolate, *Utils::OpenHandle(this),
i::kStartAtReceiver);
auto self = Utils::OpenHandle(this);
auto isolate = self->GetIsolate();
i::PrototypeIterator iter(isolate, *self, i::kStartAtReceiver);
auto tmpl_info = *Utils::OpenHandle(*tmpl);
while (!tmpl_info->IsTemplateFor(iter.GetCurrent<i::JSObject>())) {
iter.Advance();
@ -4678,7 +4717,7 @@ static Maybe<bool> ObjectSetAccessor(Local<Context> context, Object* self,
has_pending_exception =
!i::JSObject::SetAccessor(obj, info).ToHandle(&result);
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
if (result->IsUndefined(obj->GetIsolate())) return Nothing<bool>();
if (result->IsUndefined(obj->GetIsolate())) return Just(false);
if (fast) {
i::JSObject::MigrateSlowToFast(obj, 0, "APISetAccessor");
}
@ -4959,8 +4998,7 @@ Local<v8::Object> v8::Object::Clone() {
Local<v8::Context> v8::Object::CreationContext() {
auto self = Utils::OpenHandle(this);
auto context = handle(self->GetCreationContext());
return Utils::ToLocal(context);
return Utils::ToLocal(self->GetCreationContext());
}
@ -5804,7 +5842,7 @@ int String::WriteUtf8(char* buffer,
i::Handle<i::String> str = Utils::OpenHandle(this);
i::Isolate* isolate = str->GetIsolate();
LOG_API(isolate, String, WriteUtf8);
ENTER_V8(isolate);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
if (options & HINT_MANY_WRITES_EXPECTED) {
str = i::String::Flatten(str); // Flatten the string for efficiency.
}
@ -5856,7 +5894,7 @@ static inline int WriteHelper(const String* string,
int options) {
i::Isolate* isolate = Utils::OpenHandle(string)->GetIsolate();
LOG_API(isolate, String, Write);
ENTER_V8(isolate);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
DCHECK(start >= 0 && length >= -1);
i::Handle<i::String> str = Utils::OpenHandle(string);
if (options & String::HINT_MANY_WRITES_EXPECTED) {
@ -6542,10 +6580,13 @@ bool FunctionTemplate::HasInstance(v8::Local<v8::Value> value) {
return true;
}
if (obj->IsJSGlobalProxy()) {
// If it's a global proxy object, then test with the global object.
// If it's a global proxy, then test with the global object. Note that the
// inner global object may not necessarily be a JSGlobalObject.
i::PrototypeIterator iter(i::JSObject::cast(*obj)->map());
if (iter.IsAtEnd()) return false;
return self->IsTemplateFor(iter.GetCurrent<i::JSGlobalObject>());
// The global proxy should always have a prototype, as it is a bug to call
// this on a detached JSGlobalProxy.
DCHECK(!iter.IsAtEnd());
return self->IsTemplateFor(iter.GetCurrent<i::JSObject>());
}
return false;
}
@ -6723,11 +6764,17 @@ MaybeLocal<String> v8::String::NewExternalTwoByte(
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
LOG_API(i_isolate, String, NewExternalTwoByte);
if (resource->length() > 0) {
i::Handle<i::String> string = i_isolate->factory()
->NewExternalStringFromTwoByte(resource)
.ToHandleChecked();
i_isolate->heap()->RegisterExternalString(*string);
return Utils::ToLocal(string);
} else {
// The resource isn't going to be used, free it immediately.
resource->Dispose();
return Utils::ToLocal(i_isolate->factory()->empty_string());
}
}
@ -6747,11 +6794,17 @@ MaybeLocal<String> v8::String::NewExternalOneByte(
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
LOG_API(i_isolate, String, NewExternalOneByte);
if (resource->length() > 0) {
i::Handle<i::String> string = i_isolate->factory()
->NewExternalStringFromOneByte(resource)
.ToHandleChecked();
i_isolate->heap()->RegisterExternalString(*string);
return Utils::ToLocal(string);
} else {
// The resource isn't going to be used, free it immediately.
resource->Dispose();
return Utils::ToLocal(i_isolate->factory()->empty_string());
}
}
@ -7081,7 +7134,7 @@ void Map::Clear() {
auto self = Utils::OpenHandle(this);
i::Isolate* isolate = self->GetIsolate();
LOG_API(isolate, Map, Clear);
ENTER_V8(isolate);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::JSMap::Clear(self);
}
@ -7140,15 +7193,14 @@ Maybe<bool> Map::Delete(Local<Context> context, Local<Value> key) {
return Just(result->IsTrue(isolate));
}
Local<Array> Map::AsArray() const {
i::Handle<i::JSMap> obj = Utils::OpenHandle(this);
i::Isolate* isolate = obj->GetIsolate();
namespace {
i::Handle<i::JSArray> MapAsArray(i::Isolate* isolate, i::Object* table_obj,
int offset, int kind) {
i::Factory* factory = isolate->factory();
LOG_API(isolate, Map, AsArray);
ENTER_V8(isolate);
i::Handle<i::OrderedHashMap> table(i::OrderedHashMap::cast(obj->table()));
int length = table->NumberOfElements() * 2;
i::Handle<i::OrderedHashMap> table(i::OrderedHashMap::cast(table_obj));
if (offset >= table->NumberOfElements()) return factory->NewJSArray(0);
int length = (table->NumberOfElements() - offset) *
(kind == i::JSMapIterator::kKindEntries ? 2 : 1);
i::Handle<i::FixedArray> result = factory->NewFixedArray(length);
int result_index = 0;
{
@ -7158,15 +7210,30 @@ Local<Array> Map::AsArray() const {
for (int i = 0; i < capacity; ++i) {
i::Object* key = table->KeyAt(i);
if (key == the_hole) continue;
if (offset-- > 0) continue;
if (kind == i::JSMapIterator::kKindEntries ||
kind == i::JSMapIterator::kKindKeys) {
result->set(result_index++, key);
}
if (kind == i::JSMapIterator::kKindEntries ||
kind == i::JSMapIterator::kKindValues) {
result->set(result_index++, table->ValueAt(i));
}
}
}
DCHECK_EQ(result_index, result->length());
DCHECK_EQ(result_index, length);
i::Handle<i::JSArray> result_array =
factory->NewJSArrayWithElements(result, i::FAST_ELEMENTS, length);
return Utils::ToLocal(result_array);
return factory->NewJSArrayWithElements(result, i::FAST_ELEMENTS, length);
}
} // namespace
Local<Array> Map::AsArray() const {
i::Handle<i::JSMap> obj = Utils::OpenHandle(this);
i::Isolate* isolate = obj->GetIsolate();
LOG_API(isolate, Map, AsArray);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
return Utils::ToLocal(
MapAsArray(isolate, obj->table(), 0, i::JSMapIterator::kKindEntries));
}
@ -7189,7 +7256,7 @@ void Set::Clear() {
auto self = Utils::OpenHandle(this);
i::Isolate* isolate = self->GetIsolate();
LOG_API(isolate, Set, Clear);
ENTER_V8(isolate);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
i::JSSet::Clear(self);
}
@ -7232,15 +7299,13 @@ Maybe<bool> Set::Delete(Local<Context> context, Local<Value> key) {
return Just(result->IsTrue(isolate));
}
Local<Array> Set::AsArray() const {
i::Handle<i::JSSet> obj = Utils::OpenHandle(this);
i::Isolate* isolate = obj->GetIsolate();
namespace {
i::Handle<i::JSArray> SetAsArray(i::Isolate* isolate, i::Object* table_obj,
int offset) {
i::Factory* factory = isolate->factory();
LOG_API(isolate, Set, AsArray);
ENTER_V8(isolate);
i::Handle<i::OrderedHashSet> table(i::OrderedHashSet::cast(obj->table()));
int length = table->NumberOfElements();
i::Handle<i::OrderedHashSet> table(i::OrderedHashSet::cast(table_obj));
int length = table->NumberOfElements() - offset;
if (length <= 0) return factory->NewJSArray(0);
i::Handle<i::FixedArray> result = factory->NewFixedArray(length);
int result_index = 0;
{
@ -7250,14 +7315,22 @@ Local<Array> Set::AsArray() const {
for (int i = 0; i < capacity; ++i) {
i::Object* key = table->KeyAt(i);
if (key == the_hole) continue;
if (offset-- > 0) continue;
result->set(result_index++, key);
}
}
DCHECK_EQ(result_index, result->length());
DCHECK_EQ(result_index, length);
i::Handle<i::JSArray> result_array =
factory->NewJSArrayWithElements(result, i::FAST_ELEMENTS, length);
return Utils::ToLocal(result_array);
return factory->NewJSArrayWithElements(result, i::FAST_ELEMENTS, length);
}
} // namespace
Local<Array> Set::AsArray() const {
i::Handle<i::JSSet> obj = Utils::OpenHandle(this);
i::Isolate* isolate = obj->GetIsolate();
LOG_API(isolate, Set, AsArray);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
return Utils::ToLocal(SetAsArray(isolate, obj->table(), 0));
}
@ -7374,7 +7447,7 @@ bool Promise::HasHandler() {
i::Handle<i::JSReceiver> promise = Utils::OpenHandle(this);
i::Isolate* isolate = promise->GetIsolate();
LOG_API(isolate, Promise, HasRejectHandler);
ENTER_V8(isolate);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
if (promise->IsJSPromise()) {
i::Handle<i::JSPromise> js_promise = i::Handle<i::JSPromise>::cast(promise);
return js_promise->has_handler();
@ -7502,11 +7575,8 @@ MaybeLocal<WasmCompiledModule> WasmCompiledModule::Compile(Isolate* isolate,
size_t length) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::wasm::ErrorThrower thrower(i_isolate, "WasmCompiledModule::Deserialize()");
i::MaybeHandle<i::JSObject> maybe_compiled =
i::wasm::CreateModuleObjectFromBytes(
i_isolate, start, start + length, &thrower,
i::wasm::ModuleOrigin::kWasmOrigin, i::Handle<i::Script>::null(),
i::Vector<const uint8_t>::empty());
i::MaybeHandle<i::JSObject> maybe_compiled = i::wasm::SyncCompile(
i_isolate, &thrower, i::wasm::ModuleWireBytes(start, start + length));
if (maybe_compiled.is_null()) return MaybeLocal<WasmCompiledModule>();
return Local<WasmCompiledModule>::Cast(
Utils::ToLocal(maybe_compiled.ToHandleChecked()));
@ -7960,6 +8030,18 @@ v8::Local<v8::Context> Isolate::GetEnteredContext() {
return Utils::ToLocal(i::Handle<i::Context>::cast(last));
}
v8::Local<v8::Context> Isolate::GetEnteredOrMicrotaskContext() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
i::Handle<i::Object> last;
if (isolate->handle_scope_implementer()
->MicrotaskContextIsLastEnteredContext()) {
last = isolate->handle_scope_implementer()->MicrotaskContext();
} else {
last = isolate->handle_scope_implementer()->LastEnteredContext();
}
if (last.is_null()) return Local<Context>();
return Utils::ToLocal(i::Handle<i::Context>::cast(last));
}
v8::Local<Value> Isolate::ThrowException(v8::Local<v8::Value> value) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
@ -8125,6 +8207,7 @@ Isolate* Isolate::New(const Isolate::CreateParams& params) {
}
isolate->set_api_external_references(params.external_references);
isolate->set_allow_atomics_wait(params.allow_atomics_wait);
SetResourceConstraints(isolate, params.constraints);
// TODO(jochen): Once we got rid of Isolate::Current(), we can remove this.
Isolate::Scope isolate_scope(v8_isolate);
@ -8534,10 +8617,14 @@ void Isolate::IsolateInBackgroundNotification() {
void Isolate::MemoryPressureNotification(MemoryPressureLevel level) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->heap()->MemoryPressureNotification(level, Locker::IsLocked(this));
bool on_isolate_thread =
v8::Locker::IsActive()
? isolate->thread_manager()->IsLockedByCurrentThread()
: i::ThreadId::Current().Equals(isolate->thread_id());
isolate->heap()->MemoryPressureNotification(level, on_isolate_thread);
isolate->allocator()->MemoryPressureNotification(level);
isolate->compiler_dispatcher()->MemoryPressureNotification(
level, Locker::IsLocked(this));
isolate->compiler_dispatcher()->MemoryPressureNotification(level,
on_isolate_thread);
}
void Isolate::SetRAILMode(RAILMode rail_mode) {
@ -8885,11 +8972,15 @@ bool Debug::SetDebugEventListener(Isolate* isolate, EventCallback that,
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ENTER_V8(i_isolate);
i::HandleScope scope(i_isolate);
i::Handle<i::Object> foreign = i_isolate->factory()->undefined_value();
if (that != NULL) {
foreign = i_isolate->factory()->NewForeign(FUNCTION_ADDR(that));
if (that == nullptr) {
i_isolate->debug()->SetDebugDelegate(nullptr, false);
} else {
i::Handle<i::Object> i_data = i_isolate->factory()->undefined_value();
if (!data.IsEmpty()) i_data = Utils::OpenHandle(*data);
i::NativeDebugDelegate* delegate =
new i::NativeDebugDelegate(i_isolate, that, i_data);
i_isolate->debug()->SetDebugDelegate(delegate, true);
}
i_isolate->debug()->SetEventListener(foreign, Utils::OpenHandle(*data, true));
return true;
}
@ -8909,24 +9000,11 @@ bool Debug::CheckDebugBreak(Isolate* isolate) {
return internal_isolate->stack_guard()->CheckDebugBreak();
}
void Debug::SetMessageHandler(Isolate* isolate,
v8::Debug::MessageHandler handler) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ENTER_V8(i_isolate);
i_isolate->debug()->SetMessageHandler(handler);
}
void Debug::SendCommand(Isolate* isolate,
const uint16_t* command,
int length,
ClientData* client_data) {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
internal_isolate->debug()->EnqueueCommandMessage(
i::Vector<const uint16_t>(command, length), client_data);
}
v8::Debug::MessageHandler handler) {}
void Debug::SendCommand(Isolate* isolate, const uint16_t* command, int length,
ClientData* client_data) {}
MaybeLocal<Value> Debug::Call(Local<Context> context,
v8::Local<v8::Function> fun,
@ -8947,30 +9025,7 @@ MaybeLocal<Value> Debug::Call(Local<Context> context,
}
MaybeLocal<Value> Debug::GetMirror(Local<Context> context,
v8::Local<v8::Value> obj) {
PREPARE_FOR_EXECUTION(context, Debug, GetMirror, Value);
i::Debug* isolate_debug = isolate->debug();
has_pending_exception = !isolate_debug->Load();
RETURN_ON_FAILED_EXECUTION(Value);
i::Handle<i::JSObject> debug(isolate_debug->debug_context()->global_object());
auto name = isolate->factory()->NewStringFromStaticChars("MakeMirror");
auto fun_obj = i::JSReceiver::GetProperty(debug, name).ToHandleChecked();
auto v8_fun = Utils::CallableToLocal(i::Handle<i::JSFunction>::cast(fun_obj));
const int kArgc = 1;
v8::Local<v8::Value> argv[kArgc] = {obj};
Local<Value> result;
has_pending_exception =
!v8_fun->Call(context, Utils::ToLocal(debug), kArgc, argv)
.ToLocal(&result);
RETURN_ON_FAILED_EXECUTION(Value);
RETURN_ESCAPED(result);
}
void Debug::ProcessDebugMessages(Isolate* isolate) {
reinterpret_cast<i::Isolate*>(isolate)->debug()->ProcessDebugMessages(true);
}
void Debug::ProcessDebugMessages(Isolate* isolate) {}
Local<Context> Debug::GetDebugContext(Isolate* isolate) {
return debug::GetDebugContext(isolate);
@ -9012,19 +9067,6 @@ MaybeLocal<Array> Debug::GetInternalProperties(Isolate* v8_isolate,
return Utils::ToLocal(result);
}
bool debug::SetDebugEventListener(Isolate* isolate, debug::EventCallback that,
Local<Value> data) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ENTER_V8(i_isolate);
i::HandleScope scope(i_isolate);
i::Handle<i::Object> foreign = i_isolate->factory()->undefined_value();
if (that != NULL) {
foreign = i_isolate->factory()->NewForeign(FUNCTION_ADDR(that));
}
i_isolate->debug()->SetEventListener(foreign, Utils::OpenHandle(*data, true));
return true;
}
Local<Context> debug::GetDebugContext(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
ENTER_V8(i_isolate);
@ -9060,6 +9102,12 @@ void debug::ChangeBreakOnException(Isolate* isolate, ExceptionBreakState type) {
type != NoBreakOnException);
}
void debug::SetBreakPointsActive(Isolate* v8_isolate, bool is_active) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
ENTER_V8(isolate);
isolate->debug()->set_break_points_active(is_active);
}
void debug::SetOutOfMemoryCallback(Isolate* isolate,
OutOfMemoryCallback callback, void* data) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
@ -9077,11 +9125,17 @@ void debug::PrepareStep(Isolate* v8_isolate, StepAction action) {
isolate->debug()->PrepareStep(static_cast<i::StepAction>(action));
}
void debug::ClearStepping(Isolate* v8_isolate) {
bool debug::HasNonBlackboxedFrameOnStack(Isolate* v8_isolate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
ENTER_V8(isolate);
// Clear all current stepping setup.
isolate->debug()->ClearStepping();
i::HandleScope scope(isolate);
for (i::StackTraceFrameIterator it(isolate); !it.done(); it.Advance()) {
if (!it.is_javascript()) continue;
if (!isolate->debug()->IsFrameBlackboxed(it.javascript_frame())) {
return true;
}
}
return false;
}
v8::Isolate* debug::Script::GetIsolate() const {
@ -9175,6 +9229,10 @@ bool debug::Script::IsWasm() const {
return Utils::OpenHandle(this)->type() == i::Script::TYPE_WASM;
}
bool debug::Script::IsModule() const {
return Utils::OpenHandle(this)->origin_options().IsModule();
}
namespace {
int GetSmiValue(i::Handle<i::FixedArray> array, int index) {
return i::Smi::cast(array->get(index))->value();
@ -9187,8 +9245,9 @@ bool debug::Script::GetPossibleBreakpoints(
CHECK(!start.IsEmpty());
i::Handle<i::Script> script = Utils::OpenHandle(this);
if (script->type() == i::Script::TYPE_WASM) {
// TODO(clemensh): Return the proper thing once we support wasm breakpoints.
return false;
i::Handle<i::WasmCompiledModule> compiled_module(
i::WasmCompiledModule::cast(script->wasm_compiled_module()));
return compiled_module->GetPossibleBreakpoints(start, end, locations);
}
i::Script::InitLineEnds(script);
@ -9259,26 +9318,6 @@ int debug::Script::GetSourcePosition(const debug::Location& location) const {
return std::min(prev_line_offset + column + 1, line_offset);
}
MaybeLocal<debug::Script> debug::Script::Wrap(v8::Isolate* v8_isolate,
v8::Local<v8::Object> script) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
ENTER_V8(isolate);
i::HandleScope handle_scope(isolate);
i::Handle<i::JSReceiver> script_receiver(Utils::OpenHandle(*script));
if (!script_receiver->IsJSValue()) return MaybeLocal<Script>();
i::Handle<i::Object> script_value(
i::Handle<i::JSValue>::cast(script_receiver)->value(), isolate);
if (!script_value->IsScript()) {
return MaybeLocal<Script>();
}
i::Handle<i::Script> script_obj = i::Handle<i::Script>::cast(script_value);
if (script_obj->type() != i::Script::TYPE_NORMAL &&
script_obj->type() != i::Script::TYPE_WASM) {
return MaybeLocal<Script>();
}
return ToApiHandle<debug::Script>(handle_scope.CloseAndEscape(script_obj));
}
debug::WasmScript* debug::WasmScript::Cast(debug::Script* script) {
CHECK(script->IsWasm());
return static_cast<WasmScript*>(script);
@ -9304,8 +9343,26 @@ int debug::WasmScript::NumImportedFunctions() const {
return static_cast<int>(compiled_module->module()->num_imported_functions);
}
std::pair<int, int> debug::WasmScript::GetFunctionRange(
int function_index) const {
i::DisallowHeapAllocation no_gc;
i::Handle<i::Script> script = Utils::OpenHandle(this);
DCHECK_EQ(i::Script::TYPE_WASM, script->type());
i::WasmCompiledModule* compiled_module =
i::WasmCompiledModule::cast(script->wasm_compiled_module());
DCHECK_LE(0, function_index);
DCHECK_GT(compiled_module->module()->functions.size(), function_index);
i::wasm::WasmFunction& func =
compiled_module->module()->functions[function_index];
DCHECK_GE(i::kMaxInt, func.code_start_offset);
DCHECK_GE(i::kMaxInt, func.code_end_offset);
return std::make_pair(static_cast<int>(func.code_start_offset),
static_cast<int>(func.code_end_offset));
}
debug::WasmDisassembly debug::WasmScript::DisassembleFunction(
int function_index) const {
i::DisallowHeapAllocation no_gc;
i::Handle<i::Script> script = Utils::OpenHandle(this);
DCHECK_EQ(i::Script::TYPE_WASM, script->type());
i::WasmCompiledModule* compiled_module =
@ -9319,7 +9376,9 @@ debug::Location::Location(int line_number, int column_number)
CHECK(column_number >= 0);
}
debug::Location::Location() : line_number_(-1), column_number_(-1) {}
debug::Location::Location()
: line_number_(v8::Function::kLineOffsetNotFound),
column_number_(v8::Function::kLineOffsetNotFound) {}
int debug::Location::GetLineNumber() const {
CHECK(line_number_ >= 0);
@ -9369,19 +9428,29 @@ MaybeLocal<UnboundScript> debug::CompileInspectorScript(Isolate* v8_isolate,
result = i::Compiler::GetSharedFunctionInfoForScript(
str, i::Handle<i::Object>(), 0, 0, origin_options,
i::Handle<i::Object>(), isolate->native_context(), NULL, &script_data,
ScriptCompiler::kNoCompileOptions, i::INSPECTOR_CODE, false);
ScriptCompiler::kNoCompileOptions, i::INSPECTOR_CODE);
has_pending_exception = result.is_null();
RETURN_ON_FAILED_EXECUTION(UnboundScript);
}
RETURN_ESCAPED(ToApiHandle<UnboundScript>(result));
}
void debug::SetAsyncTaskListener(Isolate* v8_isolate,
debug::AsyncTaskListener listener,
void* data) {
void debug::SetDebugDelegate(Isolate* v8_isolate,
debug::DebugDelegate* delegate) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
ENTER_V8(isolate);
isolate->debug()->SetAsyncTaskListener(listener, data);
isolate->debug()->SetDebugDelegate(delegate, false);
}
void debug::ResetBlackboxedStateCache(Isolate* v8_isolate,
v8::Local<debug::Script> script) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
ENTER_V8(isolate);
i::DisallowHeapAllocation no_gc;
i::SharedFunctionInfo::ScriptIterator iter(Utils::OpenHandle(*script));
while (i::SharedFunctionInfo* info = iter.Next()) {
info->set_computed_debug_is_blackboxed(false);
}
}
int debug::EstimatedValueSize(Isolate* v8_isolate, v8::Local<v8::Value> value) {
@ -9393,6 +9462,81 @@ int debug::EstimatedValueSize(Isolate* v8_isolate, v8::Local<v8::Value> value) {
return i::Handle<i::HeapObject>::cast(object)->Size();
}
v8::MaybeLocal<v8::Array> debug::EntriesPreview(Isolate* v8_isolate,
v8::Local<v8::Value> value,
bool* is_key_value) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
ENTER_V8(isolate);
if (value->IsMap()) {
*is_key_value = true;
return value.As<Map>()->AsArray();
}
if (value->IsSet()) {
*is_key_value = false;
return value.As<Set>()->AsArray();
}
i::Handle<i::Object> object = Utils::OpenHandle(*value);
if (object->IsJSWeakCollection()) {
*is_key_value = object->IsJSWeakMap();
return Utils::ToLocal(i::JSWeakCollection::GetEntries(
i::Handle<i::JSWeakCollection>::cast(object), 0));
}
if (object->IsJSMapIterator()) {
i::Handle<i::JSMapIterator> iterator =
i::Handle<i::JSMapIterator>::cast(object);
int iterator_kind = i::Smi::cast(iterator->kind())->value();
*is_key_value = iterator_kind == i::JSMapIterator::kKindEntries;
if (!iterator->HasMore()) return v8::Array::New(v8_isolate);
return Utils::ToLocal(MapAsArray(isolate, iterator->table(),
i::Smi::cast(iterator->index())->value(),
iterator_kind));
}
if (object->IsJSSetIterator()) {
i::Handle<i::JSSetIterator> it = i::Handle<i::JSSetIterator>::cast(object);
*is_key_value = false;
if (!it->HasMore()) return v8::Array::New(v8_isolate);
return Utils::ToLocal(
SetAsArray(isolate, it->table(), i::Smi::cast(it->index())->value()));
}
return v8::MaybeLocal<v8::Array>();
}
MaybeLocal<debug::Script> debug::GeneratorObject::Script() {
i::Handle<i::JSGeneratorObject> obj = Utils::OpenHandle(this);
i::Object* maybe_script = obj->function()->shared()->script();
if (!maybe_script->IsScript()) return MaybeLocal<debug::Script>();
i::Handle<i::Script> script(i::Script::cast(maybe_script), obj->GetIsolate());
return ToApiHandle<debug::Script>(script);
}
Local<Function> debug::GeneratorObject::Function() {
i::Handle<i::JSGeneratorObject> obj = Utils::OpenHandle(this);
return Utils::ToLocal(handle(obj->function()));
}
debug::Location debug::GeneratorObject::SuspendedLocation() {
i::Handle<i::JSGeneratorObject> obj = Utils::OpenHandle(this);
CHECK(obj->is_suspended());
i::Object* maybe_script = obj->function()->shared()->script();
if (!maybe_script->IsScript()) return debug::Location();
i::Handle<i::Script> script(i::Script::cast(maybe_script), obj->GetIsolate());
i::Script::PositionInfo info;
i::Script::GetPositionInfo(script, obj->source_position(), &info,
i::Script::WITH_OFFSET);
return debug::Location(info.line, info.column);
}
bool debug::GeneratorObject::IsSuspended() {
return Utils::OpenHandle(this)->is_suspended();
}
v8::Local<debug::GeneratorObject> debug::GeneratorObject::Cast(
v8::Local<v8::Value> value) {
CHECK(value->IsGeneratorObject());
return ToApiHandle<debug::GeneratorObject>(Utils::OpenHandle(*value));
}
Local<String> CpuProfileNode::GetFunctionName() const {
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
i::Isolate* isolate = node->isolate();
@ -9410,6 +9554,56 @@ Local<String> CpuProfileNode::GetFunctionName() const {
}
}
debug::Coverage::FunctionData::FunctionData(i::CoverageFunction* function,
Local<debug::Script> script)
: function_(function) {
i::Handle<i::Script> i_script = v8::Utils::OpenHandle(*script);
i::Script::PositionInfo start;
i::Script::PositionInfo end;
i::Script::GetPositionInfo(i_script, function->start, &start,
i::Script::WITH_OFFSET);
i::Script::GetPositionInfo(i_script, function->end, &end,
i::Script::WITH_OFFSET);
start_ = Location(start.line, start.column);
end_ = Location(end.line, end.column);
}
uint32_t debug::Coverage::FunctionData::Count() { return function_->count; }
MaybeLocal<String> debug::Coverage::FunctionData::Name() {
return ToApiHandle<String>(function_->name);
}
Local<debug::Script> debug::Coverage::ScriptData::GetScript() {
return ToApiHandle<debug::Script>(script_->script);
}
size_t debug::Coverage::ScriptData::FunctionCount() {
return script_->functions.size();
}
debug::Coverage::FunctionData debug::Coverage::ScriptData::GetFunctionData(
size_t i) {
return FunctionData(&script_->functions.at(i), GetScript());
}
debug::Coverage::~Coverage() { delete coverage_; }
size_t debug::Coverage::ScriptCount() { return coverage_->size(); }
debug::Coverage::ScriptData debug::Coverage::GetScriptData(size_t i) {
return ScriptData(&coverage_->at(i));
}
debug::Coverage debug::Coverage::Collect(Isolate* isolate, bool reset_count) {
return Coverage(i::Coverage::Collect(reinterpret_cast<i::Isolate*>(isolate),
reset_count));
}
void debug::Coverage::TogglePrecise(Isolate* isolate, bool enable) {
i::Coverage::TogglePrecise(reinterpret_cast<i::Isolate*>(isolate), enable);
}
const char* CpuProfileNode::GetFunctionNameStr() const {
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
return node->entry()->name();
@ -9820,6 +10014,11 @@ void HeapProfiler::SetWrapperClassInfoProvider(uint16_t class_id,
callback);
}
void HeapProfiler::SetGetRetainerInfosCallback(
GetRetainerInfosCallback callback) {
reinterpret_cast<i::HeapProfiler*>(this)->SetGetRetainerInfosCallback(
callback);
}
size_t HeapProfiler::GetProfilerMemorySize() {
return reinterpret_cast<i::HeapProfiler*>(this)->

7
deps/v8/src/api.h vendored
View File

@ -109,7 +109,9 @@ class RegisteredExtension {
V(StackFrame, JSObject) \
V(Proxy, JSProxy) \
V(NativeWeakMap, JSWeakMap) \
V(debug::Script, Script)
V(debug::GeneratorObject, JSGeneratorObject) \
V(debug::Script, Script) \
V(Promise, JSPromise)
class Utils {
public:
@ -348,8 +350,7 @@ OPEN_HANDLE_LIST(MAKE_OPEN_HANDLE)
namespace internal {
class DeferredHandles {
class V8_EXPORT_PRIVATE DeferredHandles {
public:
~DeferredHandles();

View File

@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "src/arguments.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {

View File

@ -6,7 +6,7 @@
#define V8_ARGUMENTS_H_
#include "src/allocation.h"
#include "src/objects-inl.h"
#include "src/objects.h"
#include "src/tracing/trace-event.h"
namespace v8 {

View File

@ -41,7 +41,7 @@
#include "src/assembler.h"
#include "src/debug/debug.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@ -590,6 +590,17 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
}
}
Address Assembler::target_address_at(Address pc, Code* code) {
Address constant_pool = code ? code->constant_pool() : NULL;
return target_address_at(pc, constant_pool);
}
void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
Address target,
ICacheFlushMode icache_flush_mode) {
Address constant_pool = code ? code->constant_pool() : NULL;
set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
}
} // namespace internal
} // namespace v8

View File

@ -3878,8 +3878,10 @@ void Assembler::vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src) {
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
emit(0xFU*B28 | B25 | (dt & NeonDataTypeUMask) | B23 | d*B22 |
(dt & NeonDataTypeSizeMask)*B19 | vd*B12 | 0xA*B8 | m*B5 | B4 | vm);
int U = NeonU(dt);
int imm3 = 1 << NeonSz(dt);
emit(0xFU * B28 | B25 | U * B24 | B23 | d * B22 | imm3 * B19 | vd * B12 |
0xA * B8 | m * B5 | B4 | vm);
}
static int EncodeScalar(NeonDataType dt, int index) {
@ -3928,7 +3930,7 @@ void Assembler::vmov(NeonDataType dt, Register dst, DwVfpRegister src,
int vn, n;
src.split_code(&vn, &n);
int opc1_opc2 = EncodeScalar(dt, index);
int u = (dt & NeonDataTypeUMask) != 0 ? 1 : 0;
int u = NeonU(dt);
emit(0xEEu * B24 | u * B23 | B20 | vn * B16 | dst.code() * B12 | 0xB * B8 |
n * B7 | B4 | opc1_opc2);
}
@ -4209,81 +4211,199 @@ void Assembler::vorr(QwNeonRegister dst, QwNeonRegister src1,
emit(EncodeNeonBinaryBitwiseOp(VORR, dst, src1, src2));
}
void Assembler::vadd(QwNeonRegister dst, const QwNeonRegister src1,
enum FPBinOp {
VADDF,
VSUBF,
VMULF,
VMINF,
VMAXF,
VRECPS,
VRSQRTS,
VCEQF,
VCGEF,
VCGTF
};
static Instr EncodeNeonBinOp(FPBinOp op, QwNeonRegister dst,
QwNeonRegister src1, QwNeonRegister src2) {
int op_encoding = 0;
switch (op) {
case VADDF:
op_encoding = 0xD * B8;
break;
case VSUBF:
op_encoding = B21 | 0xD * B8;
break;
case VMULF:
op_encoding = B24 | 0xD * B8 | B4;
break;
case VMINF:
op_encoding = B21 | 0xF * B8;
break;
case VMAXF:
op_encoding = 0xF * B8;
break;
case VRECPS:
op_encoding = 0xF * B8 | B4;
break;
case VRSQRTS:
op_encoding = B21 | 0xF * B8 | B4;
break;
case VCEQF:
op_encoding = 0xE * B8;
break;
case VCGEF:
op_encoding = B24 | 0xE * B8;
break;
case VCGTF:
op_encoding = B24 | B21 | 0xE * B8;
break;
default:
UNREACHABLE();
break;
}
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
return 0x1E4U * B23 | d * B22 | vn * B16 | vd * B12 | n * B7 | B6 | m * B5 |
vm | op_encoding;
}
enum IntegerBinOp {
VADD,
VQADD,
VSUB,
VQSUB,
VMUL,
VMIN,
VMAX,
VTST,
VCEQ,
VCGE,
VCGT
};
static Instr EncodeNeonBinOp(IntegerBinOp op, NeonDataType dt,
const QwNeonRegister dst,
const QwNeonRegister src1,
const QwNeonRegister src2) {
int op_encoding = 0;
switch (op) {
case VADD:
op_encoding = 0x8 * B8;
break;
case VQADD:
op_encoding = B4;
break;
case VSUB:
op_encoding = B24 | 0x8 * B8;
break;
case VQSUB:
op_encoding = 0x2 * B8 | B4;
break;
case VMUL:
op_encoding = 0x9 * B8 | B4;
break;
case VMIN:
op_encoding = 0x6 * B8 | B4;
break;
case VMAX:
op_encoding = 0x6 * B8;
break;
case VTST:
op_encoding = 0x8 * B8 | B4;
break;
case VCEQ:
op_encoding = B24 | 0x8 * B8 | B4;
break;
case VCGE:
op_encoding = 0x3 * B8 | B4;
break;
case VCGT:
op_encoding = 0x3 * B8;
break;
default:
UNREACHABLE();
break;
}
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
int size = NeonSz(dt);
int u = NeonU(dt);
return 0x1E4U * B23 | u * B24 | d * B22 | size * B20 | vn * B16 | vd * B12 |
n * B7 | B6 | m * B5 | vm | op_encoding;
}
static Instr EncodeNeonBinOp(IntegerBinOp op, NeonSize size,
const QwNeonRegister dst,
const QwNeonRegister src1,
const QwNeonRegister src2) {
// Map NeonSize values to the signed values in NeonDataType, so the U bit
// will be 0.
return EncodeNeonBinOp(op, static_cast<NeonDataType>(size), dst, src1, src2);
}
void Assembler::vadd(QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vadd(Qn, Qm) SIMD floating point addition.
// Instruction details available in ARM DDI 0406C.b, A8-830.
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
emit(0x1E4U * B23 | d * B22 | vn * B16 | vd * B12 | 0xD * B8 | n * B7 | B6 |
m * B5 | vm);
emit(EncodeNeonBinOp(VADDF, dst, src1, src2));
}
void Assembler::vadd(NeonSize size, QwNeonRegister dst,
const QwNeonRegister src1, const QwNeonRegister src2) {
void Assembler::vadd(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vadd(Qn, Qm) SIMD integer addition.
// Instruction details available in ARM DDI 0406C.b, A8-828.
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
int sz = static_cast<int>(size);
emit(0x1E4U * B23 | d * B22 | sz * B20 | vn * B16 | vd * B12 | 0x8 * B8 |
n * B7 | B6 | m * B5 | vm);
emit(EncodeNeonBinOp(VADD, size, dst, src1, src2));
}
void Assembler::vsub(QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2) {
void Assembler::vqadd(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vqadd(Qn, Qm) SIMD integer saturating addition.
// Instruction details available in ARM DDI 0406C.b, A8-996.
emit(EncodeNeonBinOp(VQADD, dt, dst, src1, src2));
}
void Assembler::vsub(QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vsub(Qn, Qm) SIMD floating point subtraction.
// Instruction details available in ARM DDI 0406C.b, A8-1086.
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
emit(0x1E4U * B23 | d * B22 | B21 | vn * B16 | vd * B12 | 0xD * B8 | n * B7 |
B6 | m * B5 | vm);
emit(EncodeNeonBinOp(VSUBF, dst, src1, src2));
}
void Assembler::vsub(NeonSize size, QwNeonRegister dst,
const QwNeonRegister src1, const QwNeonRegister src2) {
void Assembler::vsub(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vsub(Qn, Qm) SIMD integer subtraction.
// Instruction details available in ARM DDI 0406C.b, A8-1084.
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
int sz = static_cast<int>(size);
emit(0x1E6U * B23 | d * B22 | sz * B20 | vn * B16 | vd * B12 | 0x8 * B8 |
n * B7 | B6 | m * B5 | vm);
emit(EncodeNeonBinOp(VSUB, size, dst, src1, src2));
}
void Assembler::vmul(QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2) {
void Assembler::vqsub(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vqsub(Qn, Qm) SIMD integer saturating subtraction.
// Instruction details available in ARM DDI 0406C.b, A8-1020.
emit(EncodeNeonBinOp(VQSUB, dt, dst, src1, src2));
}
void Assembler::vmul(QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vadd(Qn, Qm) SIMD floating point multiply.
// Instruction details available in ARM DDI 0406C.b, A8-958.
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
emit(0x1E6U * B23 | d * B22 | vn * B16 | vd * B12 | 0xD * B8 | n * B7 | B6 |
m * B5 | B4 | vm);
emit(EncodeNeonBinOp(VMULF, dst, src1, src2));
}
void Assembler::vmul(NeonSize size, QwNeonRegister dst,
@ -4291,43 +4411,7 @@ void Assembler::vmul(NeonSize size, QwNeonRegister dst,
DCHECK(IsEnabled(NEON));
// Qd = vadd(Qn, Qm) SIMD integer multiply.
// Instruction details available in ARM DDI 0406C.b, A8-960.
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
int sz = static_cast<int>(size);
emit(0x1E4U * B23 | d * B22 | sz * B20 | vn * B16 | vd * B12 | 0x9 * B8 |
n * B7 | B6 | m * B5 | B4 | vm);
}
static Instr EncodeNeonMinMax(bool is_min, QwNeonRegister dst,
QwNeonRegister src1, QwNeonRegister src2) {
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
int min = is_min ? 1 : 0;
return 0x1E4U * B23 | d * B22 | min * B21 | vn * B16 | vd * B12 | 0xF * B8 |
n * B7 | B6 | m * B5 | vm;
}
static Instr EncodeNeonMinMax(bool is_min, NeonDataType dt, QwNeonRegister dst,
QwNeonRegister src1, QwNeonRegister src2) {
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
int min = is_min ? 1 : 0;
int size = (dt & NeonDataTypeSizeMask) / 2;
int U = dt & NeonDataTypeUMask;
return 0x1E4U * B23 | U | d * B22 | size * B20 | vn * B16 | vd * B12 |
0x6 * B8 | B6 | m * B5 | min * B4 | vm;
emit(EncodeNeonBinOp(VMUL, size, dst, src1, src2));
}
void Assembler::vmin(const QwNeonRegister dst, const QwNeonRegister src1,
@ -4335,7 +4419,7 @@ void Assembler::vmin(const QwNeonRegister dst, const QwNeonRegister src1,
DCHECK(IsEnabled(NEON));
// Qd = vmin(Qn, Qm) SIMD floating point MIN.
// Instruction details available in ARM DDI 0406C.b, A8-928.
emit(EncodeNeonMinMax(true, dst, src1, src2));
emit(EncodeNeonBinOp(VMINF, dst, src1, src2));
}
void Assembler::vmin(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
@ -4343,7 +4427,7 @@ void Assembler::vmin(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
DCHECK(IsEnabled(NEON));
// Qd = vmin(Qn, Qm) SIMD integer MIN.
// Instruction details available in ARM DDI 0406C.b, A8-926.
emit(EncodeNeonMinMax(true, dt, dst, src1, src2));
emit(EncodeNeonBinOp(VMIN, dt, dst, src1, src2));
}
void Assembler::vmax(QwNeonRegister dst, QwNeonRegister src1,
@ -4351,7 +4435,7 @@ void Assembler::vmax(QwNeonRegister dst, QwNeonRegister src1,
DCHECK(IsEnabled(NEON));
// Qd = vmax(Qn, Qm) SIMD floating point MAX.
// Instruction details available in ARM DDI 0406C.b, A8-928.
emit(EncodeNeonMinMax(false, dst, src1, src2));
emit(EncodeNeonBinOp(VMAXF, dst, src1, src2));
}
void Assembler::vmax(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
@ -4359,7 +4443,49 @@ void Assembler::vmax(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
DCHECK(IsEnabled(NEON));
// Qd = vmax(Qn, Qm) SIMD integer MAX.
// Instruction details available in ARM DDI 0406C.b, A8-926.
emit(EncodeNeonMinMax(false, dt, dst, src1, src2));
emit(EncodeNeonBinOp(VMAX, dt, dst, src1, src2));
}
enum NeonShiftOp { VSHL, VSHR };
static Instr EncodeNeonShiftOp(NeonShiftOp op, NeonDataType dt,
QwNeonRegister dst, QwNeonRegister src,
int shift) {
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
int size_in_bits = kBitsPerByte << NeonSz(dt);
int op_encoding = 0;
int imm6 = 0;
if (op == VSHL) {
DCHECK(shift >= 0 && size_in_bits > shift);
imm6 = size_in_bits + shift;
op_encoding = 0x5 * B8;
} else {
DCHECK_EQ(VSHR, op);
DCHECK(shift > 0 && size_in_bits >= shift);
imm6 = 2 * size_in_bits - shift;
op_encoding = NeonU(dt) * B24;
}
return 0x1E5U * B23 | d * B22 | imm6 * B16 | vd * B12 | B6 | m * B5 | B4 |
vm | op_encoding;
}
void Assembler::vshl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src,
int shift) {
DCHECK(IsEnabled(NEON));
// Qd = vshl(Qm, bits) SIMD shift left immediate.
// Instruction details available in ARM DDI 0406C.b, A8-1046.
emit(EncodeNeonShiftOp(VSHL, dt, dst, src, shift));
}
void Assembler::vshr(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src,
int shift) {
DCHECK(IsEnabled(NEON));
// Qd = vshl(Qm, bits) SIMD shift right immediate.
// Instruction details available in ARM DDI 0406C.b, A8-1052.
emit(EncodeNeonShiftOp(VSHR, dt, dst, src, shift));
}
static Instr EncodeNeonEstimateOp(bool is_rsqrt, QwNeonRegister dst,
@ -4373,158 +4499,90 @@ static Instr EncodeNeonEstimateOp(bool is_rsqrt, QwNeonRegister dst,
rsqrt * B7 | B6 | m * B5 | vm;
}
void Assembler::vrecpe(const QwNeonRegister dst, const QwNeonRegister src) {
void Assembler::vrecpe(QwNeonRegister dst, QwNeonRegister src) {
DCHECK(IsEnabled(NEON));
// Qd = vrecpe(Qm) SIMD reciprocal estimate.
// Instruction details available in ARM DDI 0406C.b, A8-1024.
emit(EncodeNeonEstimateOp(false, dst, src));
}
void Assembler::vrsqrte(const QwNeonRegister dst, const QwNeonRegister src) {
void Assembler::vrsqrte(QwNeonRegister dst, QwNeonRegister src) {
DCHECK(IsEnabled(NEON));
// Qd = vrsqrte(Qm) SIMD reciprocal square root estimate.
// Instruction details available in ARM DDI 0406C.b, A8-1038.
emit(EncodeNeonEstimateOp(true, dst, src));
}
static Instr EncodeNeonRefinementOp(bool is_rsqrt, QwNeonRegister dst,
QwNeonRegister src1, QwNeonRegister src2) {
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
int rsqrt = is_rsqrt ? 1 : 0;
return 0x1E4U * B23 | d * B22 | rsqrt * B21 | vn * B16 | vd * B12 | 0xF * B8 |
n * B7 | B6 | m * B5 | B4 | vm;
}
void Assembler::vrecps(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2) {
void Assembler::vrecps(QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vrecps(Qn, Qm) SIMD reciprocal refinement step.
// Instruction details available in ARM DDI 0406C.b, A8-1026.
emit(EncodeNeonRefinementOp(false, dst, src1, src2));
emit(EncodeNeonBinOp(VRECPS, dst, src1, src2));
}
void Assembler::vrsqrts(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2) {
void Assembler::vrsqrts(QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vrsqrts(Qn, Qm) SIMD reciprocal square root refinement step.
// Instruction details available in ARM DDI 0406C.b, A8-1040.
emit(EncodeNeonRefinementOp(true, dst, src1, src2));
emit(EncodeNeonBinOp(VRSQRTS, dst, src1, src2));
}
void Assembler::vtst(NeonSize size, QwNeonRegister dst,
const QwNeonRegister src1, const QwNeonRegister src2) {
void Assembler::vtst(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vtst(Qn, Qm) SIMD test integer operands.
// Instruction details available in ARM DDI 0406C.b, A8-1098.
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
int sz = static_cast<int>(size);
emit(0x1E4U * B23 | d * B22 | sz * B20 | vn * B16 | vd * B12 | 0x8 * B8 |
n * B7 | B6 | m * B5 | B4 | vm);
emit(EncodeNeonBinOp(VTST, size, dst, src1, src2));
}
void Assembler::vceq(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2) {
void Assembler::vceq(QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vceq(Qn, Qm) SIMD floating point compare equal.
// Instruction details available in ARM DDI 0406C.b, A8-844.
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
emit(0x1E4U * B23 | d * B22 | vn * B16 | vd * B12 | 0xe * B8 | n * B7 | B6 |
m * B5 | vm);
emit(EncodeNeonBinOp(VCEQF, dst, src1, src2));
}
void Assembler::vceq(NeonSize size, QwNeonRegister dst,
const QwNeonRegister src1, const QwNeonRegister src2) {
void Assembler::vceq(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vceq(Qn, Qm) SIMD integer compare equal.
// Instruction details available in ARM DDI 0406C.b, A8-844.
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
int sz = static_cast<int>(size);
emit(0x1E6U * B23 | d * B22 | sz * B20 | vn * B16 | vd * B12 | 0x8 * B8 |
n * B7 | B6 | m * B5 | B4 | vm);
emit(EncodeNeonBinOp(VCEQ, size, dst, src1, src2));
}
static Instr EncodeNeonCompareOp(const QwNeonRegister dst,
const QwNeonRegister src1,
const QwNeonRegister src2, Condition cond) {
DCHECK(cond == ge || cond == gt);
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
int is_gt = (cond == gt) ? 1 : 0;
return 0x1E6U * B23 | d * B22 | is_gt * B21 | vn * B16 | vd * B12 | 0xe * B8 |
n * B7 | B6 | m * B5 | vm;
}
static Instr EncodeNeonCompareOp(NeonDataType dt, const QwNeonRegister dst,
const QwNeonRegister src1,
const QwNeonRegister src2, Condition cond) {
DCHECK(cond == ge || cond == gt);
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
int size = (dt & NeonDataTypeSizeMask) / 2;
int U = dt & NeonDataTypeUMask;
int is_ge = (cond == ge) ? 1 : 0;
return 0x1E4U * B23 | U | d * B22 | size * B20 | vn * B16 | vd * B12 |
0x3 * B8 | n * B7 | B6 | m * B5 | is_ge * B4 | vm;
}
void Assembler::vcge(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2) {
void Assembler::vcge(QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vcge(Qn, Qm) SIMD floating point compare greater or equal.
// Instruction details available in ARM DDI 0406C.b, A8-848.
emit(EncodeNeonCompareOp(dst, src1, src2, ge));
emit(EncodeNeonBinOp(VCGEF, dst, src1, src2));
}
void Assembler::vcge(NeonDataType dt, QwNeonRegister dst,
const QwNeonRegister src1, const QwNeonRegister src2) {
void Assembler::vcge(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vcge(Qn, Qm) SIMD integer compare greater or equal.
// Instruction details available in ARM DDI 0406C.b, A8-848.
emit(EncodeNeonCompareOp(dt, dst, src1, src2, ge));
emit(EncodeNeonBinOp(VCGE, dt, dst, src1, src2));
}
void Assembler::vcgt(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2) {
void Assembler::vcgt(QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vcgt(Qn, Qm) SIMD floating point compare greater than.
// Instruction details available in ARM DDI 0406C.b, A8-852.
emit(EncodeNeonCompareOp(dst, src1, src2, gt));
emit(EncodeNeonBinOp(VCGTF, dst, src1, src2));
}
void Assembler::vcgt(NeonDataType dt, QwNeonRegister dst,
const QwNeonRegister src1, const QwNeonRegister src2) {
void Assembler::vcgt(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vcgt(Qn, Qm) SIMD integer compare greater than.
// Instruction details available in ARM DDI 0406C.b, A8-852.
emit(EncodeNeonCompareOp(dt, dst, src1, src2, gt));
emit(EncodeNeonBinOp(VCGT, dt, dst, src1, src2));
}
void Assembler::vext(QwNeonRegister dst, const QwNeonRegister src1,

View File

@ -150,6 +150,7 @@ GENERAL_REGISTERS(DECLARE_REGISTER)
const Register no_reg = {Register::kCode_no_reg};
static const bool kSimpleFPAliasing = false;
static const bool kSimdMaskRegisters = false;
// Single word VFP register.
struct SwVfpRegister {
@ -728,17 +729,10 @@ class Assembler : public AssemblerBase {
INLINE(static void set_target_address_at(
Isolate* isolate, Address pc, Address constant_pool, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
INLINE(static Address target_address_at(Address pc, Code* code)) {
Address constant_pool = code ? code->constant_pool() : NULL;
return target_address_at(pc, constant_pool);
}
INLINE(static Address target_address_at(Address pc, Code* code));
INLINE(static void set_target_address_at(
Isolate* isolate, Address pc, Code* code, Address target,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
Address constant_pool = code ? code->constant_pool() : NULL;
set_target_address_at(isolate, pc, constant_pool, target,
icache_flush_mode);
}
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
// Return the code target address at a call site from the return address
// of that call in the instruction stream.
@ -1371,47 +1365,44 @@ class Assembler : public AssemblerBase {
void vbsl(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void veor(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vorr(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vadd(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vadd(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vsub(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vsub(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vmul(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vmul(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vmin(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vmin(NeonDataType dt, const QwNeonRegister dst,
const QwNeonRegister src1, const QwNeonRegister src2);
void vmax(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vmax(NeonDataType dt, const QwNeonRegister dst,
const QwNeonRegister src1, const QwNeonRegister src2);
void vadd(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vadd(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2);
void vqadd(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2);
void vsub(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vsub(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2);
void vqsub(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2);
void vmul(QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2);
void vmul(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2);
void vmin(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vmin(NeonDataType dt, QwNeonRegister dst,
QwNeonRegister src1, QwNeonRegister src2);
void vmax(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vmax(NeonDataType dt, QwNeonRegister dst,
QwNeonRegister src1, QwNeonRegister src2);
void vshl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src, int shift);
void vshr(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src, int shift);
// vrecpe and vrsqrte only support floating point lanes.
void vrecpe(const QwNeonRegister dst, const QwNeonRegister src);
void vrsqrte(const QwNeonRegister dst, const QwNeonRegister src);
void vrecps(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vrsqrts(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vtst(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vceq(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vceq(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vcge(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vcge(NeonDataType dt, const QwNeonRegister dst,
const QwNeonRegister src1, const QwNeonRegister src2);
void vcgt(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vcgt(NeonDataType dt, const QwNeonRegister dst,
const QwNeonRegister src1, const QwNeonRegister src2);
void vrecpe(QwNeonRegister dst, QwNeonRegister src);
void vrsqrte(QwNeonRegister dst, QwNeonRegister src);
void vrecps(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vrsqrts(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vtst(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2);
void vceq(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vceq(NeonSize size, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2);
void vcge(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vcge(NeonDataType dt, QwNeonRegister dst,
QwNeonRegister src1, QwNeonRegister src2);
void vcgt(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vcgt(NeonDataType dt, QwNeonRegister dst,
QwNeonRegister src1, QwNeonRegister src2);
void vext(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2, int bytes);
void vzip(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src);

View File

@ -195,9 +195,6 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmp(r4, Operand(SYMBOL_TYPE));
__ b(eq, slow);
// Call runtime on identical SIMD values since we must throw a TypeError.
__ cmp(r4, Operand(SIMD128_VALUE_TYPE));
__ b(eq, slow);
} else {
__ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
__ b(eq, &heap_number);
@ -208,9 +205,6 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
// Call runtime on identical symbols since we need to throw a TypeError.
__ cmp(r4, Operand(SYMBOL_TYPE));
__ b(eq, slow);
// Call runtime on identical SIMD values since we must throw a TypeError.
__ cmp(r4, Operand(SIMD128_VALUE_TYPE));
__ b(eq, slow);
// Normally here we fall through to return_equal, but undefined is
// special: (undefined == undefined) == true, but
// (undefined <= undefined) == false! See ECMAScript 11.8.5.
@ -1029,12 +1023,12 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// r2: receiver
// r3: argc
// r4: argv
int marker = type();
StackFrame::Type marker = type();
if (FLAG_enable_embedded_constant_pool) {
__ mov(r8, Operand::Zero());
}
__ mov(r7, Operand(Smi::FromInt(marker)));
__ mov(r6, Operand(Smi::FromInt(marker)));
__ mov(r7, Operand(StackFrame::TypeToMarker(marker)));
__ mov(r6, Operand(StackFrame::TypeToMarker(marker)));
__ mov(r5,
Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
__ ldr(r5, MemOperand(r5));
@ -1054,11 +1048,11 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ cmp(r6, Operand::Zero());
__ b(ne, &non_outermost_js);
__ str(fp, MemOperand(r5));
__ mov(ip, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
__ mov(ip, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
Label cont;
__ b(&cont);
__ bind(&non_outermost_js);
__ mov(ip, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
__ mov(ip, Operand(StackFrame::INNER_JSENTRY_FRAME));
__ bind(&cont);
__ push(ip);
@ -1124,7 +1118,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Check if the current stack frame is marked as the outermost JS frame.
Label non_outermost_js_2;
__ pop(r5);
__ cmp(r5, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
__ cmp(r5, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ b(ne, &non_outermost_js_2);
__ mov(r6, Operand::Zero());
__ mov(r5, Operand(ExternalReference(js_entry_sp)));
@ -1153,55 +1147,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ ldm(ia_w, sp, kCalleeSaved | pc.bit());
}
void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
Label miss;
Register receiver = LoadDescriptor::ReceiverRegister();
// Ensure that the vector and slot registers won't be clobbered before
// calling the miss handler.
DCHECK(!AreAliased(r4, r5, LoadWithVectorDescriptor::VectorRegister(),
LoadWithVectorDescriptor::SlotRegister()));
NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r4,
r5, &miss);
__ bind(&miss);
PropertyAccessCompiler::TailCallBuiltin(
masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
}
void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
// Return address is in lr.
Label miss;
Register receiver = LoadDescriptor::ReceiverRegister();
Register index = LoadDescriptor::NameRegister();
Register scratch = r5;
Register result = r0;
DCHECK(!scratch.is(receiver) && !scratch.is(index));
DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) &&
result.is(LoadWithVectorDescriptor::SlotRegister()));
// StringCharAtGenerator doesn't use the result register until it's passed
// the different miss possibilities. If it did, we would have a conflict
// when FLAG_vector_ics is true.
StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
&miss, // When not a string.
&miss, // When not a number.
&miss, // When index out of range.
RECEIVER_IS_STRING);
char_at_generator.GenerateFast(masm);
__ Ret();
StubRuntimeCallHelper call_helper;
char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper);
__ bind(&miss);
PropertyAccessCompiler::TailCallBuiltin(
masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
}
void RegExpExecStub::Generate(MacroAssembler* masm) {
// Just jump directly to runtime if native RegExp is not selected at compile
// time or if regexp entry in generated code is turned off runtime switch or
@ -1297,7 +1242,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// (6) External string. Make it, offset-wise, look like a sequential string.
// Go to (4).
// (7) Short external string or not a string? If yes, bail out to runtime.
// (8) Sliced string. Replace subject with parent. Go to (1).
// (8) Sliced or thin string. Replace subject with parent. Go to (1).
Label seq_string /* 4 */, external_string /* 6 */, check_underlying /* 1 */,
not_seq_nor_cons /* 5 */, not_long_external /* 7 */;
@ -1319,6 +1264,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// (2) Sequential or cons? If not, go to (5).
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
STATIC_ASSERT(kThinStringTag > kExternalStringTag);
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
__ cmp(r1, Operand(kExternalStringTag));
@ -1346,10 +1292,10 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ b(ls, &runtime);
__ SmiUntag(r1);
STATIC_ASSERT(4 == kOneByteStringTag);
STATIC_ASSERT(8 == kOneByteStringTag);
STATIC_ASSERT(kTwoByteStringTag == 0);
__ and_(r0, r0, Operand(kStringEncodingMask));
__ mov(r3, Operand(r0, ASR, 2), SetCC);
__ mov(r3, Operand(r0, ASR, 3), SetCC);
__ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset),
ne);
__ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
@ -1583,12 +1529,19 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask));
__ b(ne, &runtime);
// (8) Sliced string. Replace subject with parent. Go to (4).
// (8) Sliced or thin string. Replace subject with parent. Go to (4).
Label thin_string;
__ cmp(r1, Operand(kThinStringTag));
__ b(eq, &thin_string);
// Load offset into r9 and replace subject string with parent.
__ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset));
__ SmiUntag(r9);
__ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
__ jmp(&check_underlying); // Go to (4).
__ bind(&thin_string);
__ ldr(subject, FieldMemOperand(subject, ThinString::kActualOffset));
__ jmp(&check_underlying); // Go to (4).
#endif // V8_INTERPRETED_REGEXP
}
@ -1750,192 +1703,6 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
// Note: feedback_vector and slot are clobbered after the call.
static void IncrementCallCount(MacroAssembler* masm, Register feedback_vector,
Register slot) {
__ add(feedback_vector, feedback_vector,
Operand::PointerOffsetFromSmiKey(slot));
__ add(feedback_vector, feedback_vector,
Operand(FixedArray::kHeaderSize + kPointerSize));
__ ldr(slot, FieldMemOperand(feedback_vector, 0));
__ add(slot, slot, Operand(Smi::FromInt(1)));
__ str(slot, FieldMemOperand(feedback_vector, 0));
}
void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) {
// r0 - number of arguments
// r1 - function
// r3 - slot id
// r2 - vector
// r4 - allocation site (loaded from vector[slot])
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r5);
__ cmp(r1, r5);
__ b(ne, miss);
// Increment the call count for monomorphic function calls.
IncrementCallCount(masm, r2, r3);
__ mov(r2, r4);
__ mov(r3, r1);
ArrayConstructorStub stub(masm->isolate());
__ TailCallStub(&stub);
}
void CallICStub::Generate(MacroAssembler* masm) {
// r0 - number of arguments
// r1 - function
// r3 - slot id (Smi)
// r2 - vector
Label extra_checks_or_miss, call, call_function, call_count_incremented;
// The checks. First, does r1 match the recorded monomorphic target?
__ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
__ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize));
// We don't know that we have a weak cell. We might have a private symbol
// or an AllocationSite, but the memory is safe to examine.
// AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
// FixedArray.
// WeakCell::kValueOffset - contains a JSFunction or Smi(0)
// Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
// computed, meaning that it can't appear to be a pointer. If the low bit is
// 0, then hash is computed, but the 0 bit prevents the field from appearing
// to be a pointer.
STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
WeakCell::kValueOffset &&
WeakCell::kValueOffset == Symbol::kHashFieldSlot);
__ ldr(r5, FieldMemOperand(r4, WeakCell::kValueOffset));
__ cmp(r1, r5);
__ b(ne, &extra_checks_or_miss);
// The compare above could have been a SMI/SMI comparison. Guard against this
// convincing us that we have a monomorphic JSFunction.
__ JumpIfSmi(r1, &extra_checks_or_miss);
__ bind(&call_function);
// Increment the call count for monomorphic function calls.
IncrementCallCount(masm, r2, r3);
__ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(),
tail_call_mode()),
RelocInfo::CODE_TARGET);
__ bind(&extra_checks_or_miss);
Label uninitialized, miss, not_allocation_site;
__ CompareRoot(r4, Heap::kmegamorphic_symbolRootIndex);
__ b(eq, &call);
// Verify that r4 contains an AllocationSite
__ ldr(r5, FieldMemOperand(r4, HeapObject::kMapOffset));
__ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex);
__ b(ne, &not_allocation_site);
// We have an allocation site.
HandleArrayCase(masm, &miss);
__ bind(&not_allocation_site);
// The following cases attempt to handle MISS cases without going to the
// runtime.
if (FLAG_trace_ic) {
__ jmp(&miss);
}
__ CompareRoot(r4, Heap::kuninitialized_symbolRootIndex);
__ b(eq, &uninitialized);
// We are going megamorphic. If the feedback is a JSFunction, it is fine
// to handle it here. More complex cases are dealt with in the runtime.
__ AssertNotSmi(r4);
__ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE);
__ b(ne, &miss);
__ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
__ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
__ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
__ bind(&call);
// Increment the call count for megamorphic function calls.
IncrementCallCount(masm, r2, r3);
__ bind(&call_count_incremented);
__ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()),
RelocInfo::CODE_TARGET);
__ bind(&uninitialized);
// We are going monomorphic, provided we actually have a JSFunction.
__ JumpIfSmi(r1, &miss);
// Goto miss case if we do not have a function.
__ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
__ b(ne, &miss);
// Make sure the function is not the Array() function, which requires special
// behavior on MISS.
__ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r4);
__ cmp(r1, r4);
__ b(eq, &miss);
// Make sure the function belongs to the same native context.
__ ldr(r4, FieldMemOperand(r1, JSFunction::kContextOffset));
__ ldr(r4, ContextMemOperand(r4, Context::NATIVE_CONTEXT_INDEX));
__ ldr(ip, NativeContextMemOperand());
__ cmp(r4, ip);
__ b(ne, &miss);
// Store the function. Use a stub since we need a frame for allocation.
// r2 - vector
// r3 - slot
// r1 - function
{
FrameScope scope(masm, StackFrame::INTERNAL);
CreateWeakCellStub create_stub(masm->isolate());
__ SmiTag(r0);
__ Push(r0, r2, r3, cp, r1);
__ CallStub(&create_stub);
__ Pop(r2, r3, cp, r1);
__ Pop(r0);
__ SmiUntag(r0);
}
__ jmp(&call_function);
// We are here because tracing is on or we encountered a MISS case we can't
// handle here.
__ bind(&miss);
GenerateMiss(masm);
__ jmp(&call_count_incremented);
}
void CallICStub::GenerateMiss(MacroAssembler* masm) {
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
// Preserve the number of arguments as Smi.
__ SmiTag(r0);
// Push the receiver and the function and feedback info.
__ Push(r0, r1, r2, r3);
// Call the entry.
__ CallRuntime(Runtime::kCallIC_Miss);
// Move result to edi and exit the internal frame.
__ mov(r1, r0);
// Restore number of arguments.
__ Pop(r0);
__ SmiUntag(r0);
}
// StringCharCodeAtGenerator
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
// If the receiver is a smi trigger the non-string case.
@ -2027,45 +1794,6 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
}
// -------------------------------------------------------------------------
// StringCharFromCodeGenerator
void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
// Fast case of Heap::LookupSingleCharacterStringFromCode.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiShiftSize == 0);
DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU + 1));
__ tst(code_, Operand(kSmiTagMask |
((~String::kMaxOneByteCharCodeU) << kSmiTagSize)));
__ b(ne, &slow_case_);
__ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
// At this point code register contains smi tagged one-byte char code.
__ add(result_, result_, Operand::PointerOffsetFromSmiKey(code_));
__ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
__ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
__ b(eq, &slow_case_);
__ bind(&exit_);
}
void StringCharFromCodeGenerator::GenerateSlow(
MacroAssembler* masm,
const RuntimeCallHelper& call_helper) {
__ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
__ bind(&slow_case_);
call_helper.BeforeCall(masm);
__ push(code_);
__ CallRuntime(Runtime::kStringCharFromCode);
__ Move(result_, r0);
call_helper.AfterCall(masm);
__ jmp(&exit_);
__ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
}
void StringHelper::GenerateFlatOneByteStringEquals(
MacroAssembler* masm, Register left, Register right, Register scratch1,
Register scratch2, Register scratch3) {
@ -2924,15 +2652,10 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ Ret();
}
void CallICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadFeedbackVector(r2);
CallICStub stub(isolate(), state());
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate());
masm->MaybeCheckConstPool();
PredictableCodeSizeScope predictable(masm);
predictable.ExpectSize(masm->CallStubSize(&stub) +
2 * Assembler::kInstrSize);
@ -3288,495 +3011,6 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
GenerateCase(masm, FAST_ELEMENTS);
}
void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r1 : function
// -- cp : context
// -- fp : frame pointer
// -- lr : return address
// -----------------------------------
__ AssertFunction(r1);
// Make r2 point to the JavaScript frame.
__ mov(r2, fp);
if (skip_stub_frame()) {
// For Ignition we need to skip the handler/stub frame to reach the
// JavaScript frame for the function.
__ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
}
if (FLAG_debug_code) {
Label ok;
__ ldr(ip, MemOperand(r2, StandardFrameConstants::kFunctionOffset));
__ cmp(ip, r1);
__ b(eq, &ok);
__ Abort(kInvalidFrameForFastNewRestArgumentsStub);
__ bind(&ok);
}
// Check if we have rest parameters (only possible if we have an
// arguments adaptor frame below the function frame).
Label no_rest_parameters;
__ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
__ ldr(ip, MemOperand(r2, CommonFrameConstants::kContextOrFrameTypeOffset));
__ cmp(ip, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(ne, &no_rest_parameters);
// Check if the arguments adaptor frame contains more arguments than
// specified by the function's internal formal parameter count.
Label rest_parameters;
__ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r3,
FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
__ sub(r0, r0, r3, SetCC);
__ b(gt, &rest_parameters);
// Return an empty rest parameter array.
__ bind(&no_rest_parameters);
{
// ----------- S t a t e -------------
// -- cp : context
// -- lr : return address
// -----------------------------------
// Allocate an empty rest parameter array.
Label allocate, done_allocate;
__ Allocate(JSArray::kSize, r0, r1, r2, &allocate, NO_ALLOCATION_FLAGS);
__ bind(&done_allocate);
// Setup the rest parameter array in r0.
__ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r1);
__ str(r1, FieldMemOperand(r0, JSArray::kMapOffset));
__ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
__ str(r1, FieldMemOperand(r0, JSArray::kPropertiesOffset));
__ str(r1, FieldMemOperand(r0, JSArray::kElementsOffset));
__ mov(r1, Operand(0));
__ str(r1, FieldMemOperand(r0, JSArray::kLengthOffset));
STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
__ Ret();
// Fall back to %AllocateInNewSpace.
__ bind(&allocate);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(Smi::FromInt(JSArray::kSize));
__ CallRuntime(Runtime::kAllocateInNewSpace);
}
__ jmp(&done_allocate);
}
__ bind(&rest_parameters);
{
// Compute the pointer to the first rest parameter (skippping the receiver).
__ add(r2, r2, Operand(r0, LSL, kPointerSizeLog2 - 1));
__ add(r2, r2,
Operand(StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
// ----------- S t a t e -------------
// -- cp : context
// -- r0 : number of rest parameters (tagged)
// -- r1 : function
// -- r2 : pointer to first rest parameters
// -- lr : return address
// -----------------------------------
// Allocate space for the rest parameter array plus the backing store.
Label allocate, done_allocate;
__ mov(r6, Operand(JSArray::kSize + FixedArray::kHeaderSize));
__ add(r6, r6, Operand(r0, LSL, kPointerSizeLog2 - 1));
__ Allocate(r6, r3, r4, r5, &allocate, NO_ALLOCATION_FLAGS);
__ bind(&done_allocate);
// Setup the elements array in r3.
__ LoadRoot(r1, Heap::kFixedArrayMapRootIndex);
__ str(r1, FieldMemOperand(r3, FixedArray::kMapOffset));
__ str(r0, FieldMemOperand(r3, FixedArray::kLengthOffset));
__ add(r4, r3, Operand(FixedArray::kHeaderSize));
{
Label loop, done_loop;
__ add(r1, r4, Operand(r0, LSL, kPointerSizeLog2 - 1));
__ bind(&loop);
__ cmp(r4, r1);
__ b(eq, &done_loop);
__ ldr(ip, MemOperand(r2, 1 * kPointerSize, NegPostIndex));
__ str(ip, FieldMemOperand(r4, 0 * kPointerSize));
__ add(r4, r4, Operand(1 * kPointerSize));
__ b(&loop);
__ bind(&done_loop);
}
// Setup the rest parameter array in r4.
__ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r1);
__ str(r1, FieldMemOperand(r4, JSArray::kMapOffset));
__ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
__ str(r1, FieldMemOperand(r4, JSArray::kPropertiesOffset));
__ str(r3, FieldMemOperand(r4, JSArray::kElementsOffset));
__ str(r0, FieldMemOperand(r4, JSArray::kLengthOffset));
STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
__ mov(r0, r4);
__ Ret();
// Fall back to %AllocateInNewSpace (if not too big).
Label too_big_for_new_space;
__ bind(&allocate);
__ cmp(r6, Operand(kMaxRegularHeapObjectSize));
__ b(gt, &too_big_for_new_space);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(r6);
__ Push(r0, r2, r6);
__ CallRuntime(Runtime::kAllocateInNewSpace);
__ mov(r3, r0);
__ Pop(r0, r2);
}
__ jmp(&done_allocate);
// Fall back to %NewRestParameter.
__ bind(&too_big_for_new_space);
__ push(r1);
__ TailCallRuntime(Runtime::kNewRestParameter);
}
}
void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r1 : function
// -- cp : context
// -- fp : frame pointer
// -- lr : return address
// -----------------------------------
__ AssertFunction(r1);
// Make r9 point to the JavaScript frame.
__ mov(r9, fp);
if (skip_stub_frame()) {
// For Ignition we need to skip the handler/stub frame to reach the
// JavaScript frame for the function.
__ ldr(r9, MemOperand(r9, StandardFrameConstants::kCallerFPOffset));
}
if (FLAG_debug_code) {
Label ok;
__ ldr(ip, MemOperand(r9, StandardFrameConstants::kFunctionOffset));
__ cmp(ip, r1);
__ b(eq, &ok);
__ Abort(kInvalidFrameForFastNewRestArgumentsStub);
__ bind(&ok);
}
// TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub.
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r2,
FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset));
__ add(r3, r9, Operand(r2, LSL, kPointerSizeLog2 - 1));
__ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset));
// r1 : function
// r2 : number of parameters (tagged)
// r3 : parameters pointer
// r9 : JavaScript frame pointer
// Registers used over whole function:
// r5 : arguments count (tagged)
// r6 : mapped parameter count (tagged)
// Check if the calling frame is an arguments adaptor frame.
Label adaptor_frame, try_allocate, runtime;
__ ldr(r4, MemOperand(r9, StandardFrameConstants::kCallerFPOffset));
__ ldr(r0, MemOperand(r4, CommonFrameConstants::kContextOrFrameTypeOffset));
__ cmp(r0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(eq, &adaptor_frame);
// No adaptor, parameter count = argument count.
__ mov(r5, r2);
__ mov(r6, r2);
__ b(&try_allocate);
// We have an adaptor frame. Patch the parameters pointer.
__ bind(&adaptor_frame);
__ ldr(r5, MemOperand(r4, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ add(r4, r4, Operand(r5, LSL, 1));
__ add(r3, r4, Operand(StandardFrameConstants::kCallerSPOffset));
// r5 = argument count (tagged)
// r6 = parameter count (tagged)
// Compute the mapped parameter count = min(r6, r5) in r6.
__ mov(r6, r2);
__ cmp(r6, Operand(r5));
__ mov(r6, Operand(r5), LeaveCC, gt);
__ bind(&try_allocate);
// Compute the sizes of backing store, parameter map, and arguments object.
// 1. Parameter map, has 2 extra words containing context and backing store.
const int kParameterMapHeaderSize =
FixedArray::kHeaderSize + 2 * kPointerSize;
// If there are no mapped parameters, we do not need the parameter_map.
__ cmp(r6, Operand(Smi::kZero));
__ mov(r9, Operand::Zero(), LeaveCC, eq);
__ mov(r9, Operand(r6, LSL, 1), LeaveCC, ne);
__ add(r9, r9, Operand(kParameterMapHeaderSize), LeaveCC, ne);
// 2. Backing store.
__ add(r9, r9, Operand(r5, LSL, 1));
__ add(r9, r9, Operand(FixedArray::kHeaderSize));
// 3. Arguments object.
__ add(r9, r9, Operand(JSSloppyArgumentsObject::kSize));
// Do the allocation of all three objects in one go.
__ Allocate(r9, r0, r9, r4, &runtime, NO_ALLOCATION_FLAGS);
// r0 = address of new object(s) (tagged)
// r2 = argument count (smi-tagged)
// Get the arguments boilerplate from the current native context into r4.
const int kNormalOffset =
Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
const int kAliasedOffset =
Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
__ ldr(r4, NativeContextMemOperand());
__ cmp(r6, Operand::Zero());
__ ldr(r4, MemOperand(r4, kNormalOffset), eq);
__ ldr(r4, MemOperand(r4, kAliasedOffset), ne);
// r0 = address of new object (tagged)
// r2 = argument count (smi-tagged)
// r4 = address of arguments map (tagged)
// r6 = mapped parameter count (tagged)
__ str(r4, FieldMemOperand(r0, JSObject::kMapOffset));
__ LoadRoot(r9, Heap::kEmptyFixedArrayRootIndex);
__ str(r9, FieldMemOperand(r0, JSObject::kPropertiesOffset));
__ str(r9, FieldMemOperand(r0, JSObject::kElementsOffset));
// Set up the callee in-object property.
__ AssertNotSmi(r1);
__ str(r1, FieldMemOperand(r0, JSSloppyArgumentsObject::kCalleeOffset));
// Use the length (smi tagged) and set that as an in-object property too.
__ AssertSmi(r5);
__ str(r5, FieldMemOperand(r0, JSSloppyArgumentsObject::kLengthOffset));
// Set up the elements pointer in the allocated arguments object.
// If we allocated a parameter map, r4 will point there, otherwise
// it will point to the backing store.
__ add(r4, r0, Operand(JSSloppyArgumentsObject::kSize));
__ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
// r0 = address of new object (tagged)
// r2 = argument count (tagged)
// r4 = address of parameter map or backing store (tagged)
// r6 = mapped parameter count (tagged)
// Initialize parameter map. If there are no mapped arguments, we're done.
Label skip_parameter_map;
__ cmp(r6, Operand(Smi::kZero));
// Move backing store address to r1, because it is
// expected there when filling in the unmapped arguments.
__ mov(r1, r4, LeaveCC, eq);
__ b(eq, &skip_parameter_map);
__ LoadRoot(r5, Heap::kSloppyArgumentsElementsMapRootIndex);
__ str(r5, FieldMemOperand(r4, FixedArray::kMapOffset));
__ add(r5, r6, Operand(Smi::FromInt(2)));
__ str(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
__ str(cp, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize));
__ add(r5, r4, Operand(r6, LSL, 1));
__ add(r5, r5, Operand(kParameterMapHeaderSize));
__ str(r5, FieldMemOperand(r4, FixedArray::kHeaderSize + 1 * kPointerSize));
// Copy the parameter slots and the holes in the arguments.
// We need to fill in mapped_parameter_count slots. They index the context,
// where parameters are stored in reverse order, at
// MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
// The mapped parameter thus need to get indices
// MIN_CONTEXT_SLOTS+parameter_count-1 ..
// MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
// We loop from right to left.
Label parameters_loop, parameters_test;
__ mov(r5, r6);
__ add(r9, r2, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
__ sub(r9, r9, Operand(r6));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ add(r1, r4, Operand(r5, LSL, 1));
__ add(r1, r1, Operand(kParameterMapHeaderSize));
// r1 = address of backing store (tagged)
// r4 = address of parameter map (tagged), which is also the address of new
// object + Heap::kSloppyArgumentsObjectSize (tagged)
// r0 = temporary scratch (a.o., for address calculation)
// r5 = loop variable (tagged)
// ip = the hole value
__ jmp(&parameters_test);
__ bind(&parameters_loop);
__ sub(r5, r5, Operand(Smi::FromInt(1)));
__ mov(r0, Operand(r5, LSL, 1));
__ add(r0, r0, Operand(kParameterMapHeaderSize - kHeapObjectTag));
__ str(r9, MemOperand(r4, r0));
__ sub(r0, r0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
__ str(ip, MemOperand(r1, r0));
__ add(r9, r9, Operand(Smi::FromInt(1)));
__ bind(&parameters_test);
__ cmp(r5, Operand(Smi::kZero));
__ b(ne, &parameters_loop);
// Restore r0 = new object (tagged) and r5 = argument count (tagged).
__ sub(r0, r4, Operand(JSSloppyArgumentsObject::kSize));
__ ldr(r5, FieldMemOperand(r0, JSSloppyArgumentsObject::kLengthOffset));
__ bind(&skip_parameter_map);
// r0 = address of new object (tagged)
// r1 = address of backing store (tagged)
// r5 = argument count (tagged)
// r6 = mapped parameter count (tagged)
// r9 = scratch
// Copy arguments header and remaining slots (if there are any).
__ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
__ str(r9, FieldMemOperand(r1, FixedArray::kMapOffset));
__ str(r5, FieldMemOperand(r1, FixedArray::kLengthOffset));
Label arguments_loop, arguments_test;
__ sub(r3, r3, Operand(r6, LSL, 1));
__ jmp(&arguments_test);
__ bind(&arguments_loop);
__ sub(r3, r3, Operand(kPointerSize));
__ ldr(r4, MemOperand(r3, 0));
__ add(r9, r1, Operand(r6, LSL, 1));
__ str(r4, FieldMemOperand(r9, FixedArray::kHeaderSize));
__ add(r6, r6, Operand(Smi::FromInt(1)));
__ bind(&arguments_test);
__ cmp(r6, Operand(r5));
__ b(lt, &arguments_loop);
// Return.
__ Ret();
// Do the runtime call to allocate the arguments object.
// r0 = address of new object (tagged)
// r5 = argument count (tagged)
__ bind(&runtime);
__ Push(r1, r3, r5);
__ TailCallRuntime(Runtime::kNewSloppyArguments);
}
void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r1 : function
// -- cp : context
// -- fp : frame pointer
// -- lr : return address
// -----------------------------------
__ AssertFunction(r1);
// Make r2 point to the JavaScript frame.
__ mov(r2, fp);
if (skip_stub_frame()) {
// For Ignition we need to skip the handler/stub frame to reach the
// JavaScript frame for the function.
__ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
}
if (FLAG_debug_code) {
Label ok;
__ ldr(ip, MemOperand(r2, StandardFrameConstants::kFunctionOffset));
__ cmp(ip, r1);
__ b(eq, &ok);
__ Abort(kInvalidFrameForFastNewRestArgumentsStub);
__ bind(&ok);
}
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
__ ldr(r3, MemOperand(r2, StandardFrameConstants::kCallerFPOffset));
__ ldr(ip, MemOperand(r3, CommonFrameConstants::kContextOrFrameTypeOffset));
__ cmp(ip, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(eq, &arguments_adaptor);
{
__ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r0, FieldMemOperand(
r4, SharedFunctionInfo::kFormalParameterCountOffset));
__ add(r2, r2, Operand(r0, LSL, kPointerSizeLog2 - 1));
__ add(r2, r2,
Operand(StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
}
__ b(&arguments_done);
__ bind(&arguments_adaptor);
{
__ ldr(r0, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ add(r2, r3, Operand(r0, LSL, kPointerSizeLog2 - 1));
__ add(r2, r2,
Operand(StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
}
__ bind(&arguments_done);
// ----------- S t a t e -------------
// -- cp : context
// -- r0 : number of rest parameters (tagged)
// -- r1 : function
// -- r2 : pointer to first rest parameters
// -- lr : return address
// -----------------------------------
// Allocate space for the strict arguments object plus the backing store.
Label allocate, done_allocate;
__ mov(r6, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
__ add(r6, r6, Operand(r0, LSL, kPointerSizeLog2 - 1));
__ Allocate(r6, r3, r4, r5, &allocate, NO_ALLOCATION_FLAGS);
__ bind(&done_allocate);
// Setup the elements array in r3.
__ LoadRoot(r1, Heap::kFixedArrayMapRootIndex);
__ str(r1, FieldMemOperand(r3, FixedArray::kMapOffset));
__ str(r0, FieldMemOperand(r3, FixedArray::kLengthOffset));
__ add(r4, r3, Operand(FixedArray::kHeaderSize));
{
Label loop, done_loop;
__ add(r1, r4, Operand(r0, LSL, kPointerSizeLog2 - 1));
__ bind(&loop);
__ cmp(r4, r1);
__ b(eq, &done_loop);
__ ldr(ip, MemOperand(r2, 1 * kPointerSize, NegPostIndex));
__ str(ip, FieldMemOperand(r4, 0 * kPointerSize));
__ add(r4, r4, Operand(1 * kPointerSize));
__ b(&loop);
__ bind(&done_loop);
}
// Setup the strict arguments object in r4.
__ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, r1);
__ str(r1, FieldMemOperand(r4, JSStrictArgumentsObject::kMapOffset));
__ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex);
__ str(r1, FieldMemOperand(r4, JSStrictArgumentsObject::kPropertiesOffset));
__ str(r3, FieldMemOperand(r4, JSStrictArgumentsObject::kElementsOffset));
__ str(r0, FieldMemOperand(r4, JSStrictArgumentsObject::kLengthOffset));
STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
__ mov(r0, r4);
__ Ret();
// Fall back to %AllocateInNewSpace (if not too big).
Label too_big_for_new_space;
__ bind(&allocate);
__ cmp(r6, Operand(kMaxRegularHeapObjectSize));
__ b(gt, &too_big_for_new_space);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(r6);
__ Push(r0, r2, r6);
__ CallRuntime(Runtime::kAllocateInNewSpace);
__ mov(r3, r0);
__ Pop(r0, r2);
}
__ b(&done_allocate);
// Fall back to %NewStrictArguments.
__ bind(&too_big_for_new_space);
__ push(r1);
__ TailCallRuntime(Runtime::kNewStrictArguments);
}
static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
return ref0.address() - ref1.address();
}

View File

@ -322,6 +322,9 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
Register index,
Register result,
Label* call_runtime) {
Label indirect_string_loaded;
__ bind(&indirect_string_loaded);
// Fetch the instance type of the receiver into result register.
__ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
__ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
@ -332,17 +335,24 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ b(eq, &check_sequential);
// Dispatch on the indirect string shape: slice or cons.
Label cons_string;
__ tst(result, Operand(kSlicedNotConsMask));
Label cons_string, thin_string;
__ and_(result, result, Operand(kStringRepresentationMask));
__ cmp(result, Operand(kConsStringTag));
__ b(eq, &cons_string);
__ cmp(result, Operand(kThinStringTag));
__ b(eq, &thin_string);
// Handle slices.
Label indirect_string_loaded;
__ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
__ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
__ add(index, index, Operand::SmiUntag(result));
__ jmp(&indirect_string_loaded);
// Handle thin strings.
__ bind(&thin_string);
__ ldr(string, FieldMemOperand(string, ThinString::kActualOffset));
__ jmp(&indirect_string_loaded);
// Handle cons strings.
// Check whether the right hand side is the empty string (i.e. if
// this is really a flat string in a cons string). If that is not
@ -354,10 +364,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ b(ne, call_runtime);
// Get the first of the two strings and load its instance type.
__ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
__ bind(&indirect_string_loaded);
__ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
__ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
__ jmp(&indirect_string_loaded);
// Distinguish sequential and external strings. Only these two string
// representations can reach here (slices and flat cons strings have been

View File

@ -327,16 +327,18 @@ enum LFlag {
// NEON data type
enum NeonDataType {
NeonS8 = 0x1, // U = 0, imm3 = 0b001
NeonS16 = 0x2, // U = 0, imm3 = 0b010
NeonS32 = 0x4, // U = 0, imm3 = 0b100
NeonU8 = 1 << 24 | 0x1, // U = 1, imm3 = 0b001
NeonU16 = 1 << 24 | 0x2, // U = 1, imm3 = 0b010
NeonU32 = 1 << 24 | 0x4, // U = 1, imm3 = 0b100
NeonDataTypeSizeMask = 0x7,
NeonDataTypeUMask = 1 << 24
NeonS8 = 0,
NeonS16 = 1,
NeonS32 = 2,
// Gap to make it easier to extract U and size.
NeonU8 = 4,
NeonU16 = 5,
NeonU32 = 6
};
inline int NeonU(NeonDataType dt) { return static_cast<int>(dt) >> 2; }
inline int NeonSz(NeonDataType dt) { return static_cast<int>(dt) & 0x3; }
enum NeonListType {
nlt_1 = 0x7,
nlt_2 = 0xA,

View File

@ -95,7 +95,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) {
double double_value = input_->GetDoubleRegister(i);
Float64 double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
}
}

View File

@ -1856,104 +1856,150 @@ static const char* const barrier_option_names[] = {
void Decoder::DecodeSpecialCondition(Instruction* instr) {
switch (instr->SpecialValue()) {
case 4:
if (instr->Bits(11, 8) == 1 && instr->Bits(21, 20) == 2 &&
instr->Bit(6) == 1 && instr->Bit(4) == 1) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
case 4: {
int Vd, Vm, Vn;
if (instr->Bit(6) == 0) {
Vd = instr->VFPDRegValue(kDoublePrecision);
Vm = instr->VFPMRegValue(kDoublePrecision);
Vn = instr->VFPNRegValue(kDoublePrecision);
} else {
Vd = instr->VFPDRegValue(kSimd128Precision);
Vm = instr->VFPMRegValue(kSimd128Precision);
Vn = instr->VFPNRegValue(kSimd128Precision);
}
switch (instr->Bits(11, 8)) {
case 0x0: {
if (instr->Bit(4) == 1) {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
// vqadd.s<size> Qd, Qm, Qn.
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_,
"vqadd.s%d q%d, q%d, q%d", size, Vd, Vn, Vm);
} else {
Unknown(instr);
}
break;
}
case 0x1: {
if (instr->Bits(21, 20) == 2 && instr->Bit(6) == 1 &&
instr->Bit(4) == 1) {
if (Vm == Vn) {
// vmov Qd, Qm
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vmov q%d, q%d", Vd, Vm);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vmov q%d, q%d", Vd, Vm);
} else {
// vorr Qd, Qm, Qn.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vorr q%d, q%d, q%d", Vd, Vn, Vm);
}
} else if (instr->Bits(11, 8) == 8) {
const char* op = (instr->Bit(4) == 0) ? "vadd" : "vtst";
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
// vadd/vtst.i<size> Qd, Qm, Qn.
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "%s.i%d q%d, q%d, q%d", op,
size, Vd, Vn, Vm);
} else if (instr->Bits(11, 8) == 0xd && instr->Bit(4) == 0) {
const char* op = (instr->Bits(21, 20) == 0) ? "vadd" : "vsub";
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
// vadd/vsub.f32 Qd, Qm, Qn.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"%s.f32 q%d, q%d, q%d", op, Vd, Vn, Vm);
} else if (instr->Bits(11, 8) == 0x9 && instr->Bit(6) == 1 &&
} else if (instr->Bits(21, 20) == 0 && instr->Bit(6) == 1 &&
instr->Bit(4) == 1) {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
// vmul.i<size> Qd, Qm, Qn.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vmul.i%d q%d, q%d, q%d", size, Vd, Vn, Vm);
} else if (instr->Bits(11, 8) == 0xe && instr->Bits(21, 20) == 0 &&
instr->Bit(4) == 0) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
// vceq.f32 Qd, Qm, Qn.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vceq.f32 q%d, q%d, q%d", Vd, Vn, Vm);
} else if (instr->Bits(11, 8) == 1 && instr->Bits(21, 20) == 0 &&
instr->Bit(6) == 1 && instr->Bit(4) == 1) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
// vand Qd, Qm, Qn.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vand q%d, q%d, q%d", Vd, Vn, Vm);
} else if (instr->Bits(11, 8) == 0x3) {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
const char* op = (instr->Bit(4) == 1) ? "vcge" : "vcgt";
// vcge/vcgt.s<size> Qd, Qm, Qn.
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "%s.s%d q%d, q%d, q%d", op,
size, Vd, Vn, Vm);
} else if (instr->Bits(11, 8) == 0xf && instr->Bit(20) == 0 &&
instr->Bit(6) == 1) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
if (instr->Bit(4) == 1) {
// vrecps/vrsqrts.f32 Qd, Qm, Qn.
const char* op = instr->Bit(21) == 0 ? "vrecps" : "vrsqrts";
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"%s.f32 q%d, q%d, q%d", op, Vd, Vn, Vm);
} else {
// vmin/max.f32 Qd, Qm, Qn.
const char* op = instr->Bit(21) == 1 ? "vmin" : "vmax";
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"%s.f32 q%d, q%d, q%d", op, Vd, Vn, Vm);
}
} else if (instr->Bits(11, 8) == 0x6) {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
// vmin/vmax.s<size> Qd, Qm, Qn.
const char* op = instr->Bit(4) == 1 ? "vmin" : "vmax";
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "%s.s%d q%d, q%d, q%d", op,
size, Vd, Vn, Vm);
} else {
Unknown(instr);
}
break;
}
case 0x2: {
if (instr->Bit(4) == 1) {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
// vqsub.s<size> Qd, Qm, Qn.
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_,
"vqsub.s%d q%d, q%d, q%d", size, Vd, Vn, Vm);
} else {
Unknown(instr);
}
break;
}
case 0x3: {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
const char* op = (instr->Bit(4) == 1) ? "vcge" : "vcgt";
// vcge/vcgt.s<size> Qd, Qm, Qn.
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "%s.s%d q%d, q%d, q%d",
op, size, Vd, Vn, Vm);
break;
}
case 0x6: {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
// vmin/vmax.s<size> Qd, Qm, Qn.
const char* op = instr->Bit(4) == 1 ? "vmin" : "vmax";
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "%s.s%d q%d, q%d, q%d",
op, size, Vd, Vn, Vm);
break;
}
case 0x8: {
const char* op = (instr->Bit(4) == 0) ? "vadd" : "vtst";
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
// vadd/vtst.i<size> Qd, Qm, Qn.
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "%s.i%d q%d, q%d, q%d",
op, size, Vd, Vn, Vm);
break;
}
case 0x9: {
if (instr->Bit(6) == 1 && instr->Bit(4) == 1) {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
// vmul.i<size> Qd, Qm, Qn.
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_,
"vmul.i%d q%d, q%d, q%d", size, Vd, Vn, Vm);
} else {
Unknown(instr);
}
break;
}
case 0xd: {
if (instr->Bit(4) == 0) {
const char* op = (instr->Bits(21, 20) == 0) ? "vadd" : "vsub";
// vadd/vsub.f32 Qd, Qm, Qn.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"%s.f32 q%d, q%d, q%d", op, Vd, Vn, Vm);
} else {
Unknown(instr);
}
break;
}
case 0xe: {
if (instr->Bits(21, 20) == 0 && instr->Bit(4) == 0) {
// vceq.f32 Qd, Qm, Qn.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vceq.f32 q%d, q%d, q%d", Vd, Vn, Vm);
} else {
Unknown(instr);
}
break;
}
case 0xf: {
if (instr->Bit(20) == 0 && instr->Bit(6) == 1) {
if (instr->Bit(4) == 1) {
// vrecps/vrsqrts.f32 Qd, Qm, Qn.
const char* op = instr->Bit(21) == 0 ? "vrecps" : "vrsqrts";
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_,
"%s.f32 q%d, q%d, q%d", op, Vd, Vn, Vm);
} else {
// vmin/max.f32 Qd, Qm, Qn.
const char* op = instr->Bit(21) == 1 ? "vmin" : "vmax";
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_,
"%s.f32 q%d, q%d, q%d", op, Vd, Vn, Vm);
}
} else {
Unknown(instr);
}
break;
}
default:
Unknown(instr);
break;
}
break;
}
case 5:
if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
(instr->Bit(4) == 1)) {
@ -1963,7 +2009,7 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
int Vm = (instr->Bit(5) << 4) | instr->VmValue();
int imm3 = instr->Bits(21, 19);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vmovl.s%d q%d, d%d", imm3*8, Vd, Vm);
"vmovl.s%d q%d, d%d", imm3 * 8, Vd, Vm);
} else if (instr->Bits(21, 20) == 3 && instr->Bit(4) == 0) {
// vext.8 Qd, Qm, Qn, imm4
int imm4 = instr->Bits(11, 8);
@ -1973,91 +2019,142 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vext.8 q%d, q%d, q%d, #%d",
Vd, Vn, Vm, imm4);
} else if (instr->Bits(11, 7) == 0xA && instr->Bit(4) == 1) {
// vshl.i<size> Qd, Qm, shift
int size = base::bits::RoundDownToPowerOfTwo32(instr->Bits(21, 16));
int shift = instr->Bits(21, 16) - size;
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vshl.i%d q%d, q%d, #%d",
size, Vd, Vm, shift);
} else if (instr->Bits(11, 7) == 0 && instr->Bit(4) == 1) {
// vshr.s<size> Qd, Qm, shift
int size = base::bits::RoundDownToPowerOfTwo32(instr->Bits(21, 16));
int shift = 2 * size - instr->Bits(21, 16);
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vshr.s%d q%d, q%d, #%d",
size, Vd, Vm, shift);
} else {
Unknown(instr);
}
break;
case 6:
if (instr->Bits(11, 8) == 8) {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
if (instr->Bit(4) == 0) {
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vsub.i%d q%d, q%d, q%d",
size, Vd, Vn, Vm);
case 6: {
int Vd, Vm, Vn;
if (instr->Bit(6) == 0) {
Vd = instr->VFPDRegValue(kDoublePrecision);
Vm = instr->VFPMRegValue(kDoublePrecision);
Vn = instr->VFPNRegValue(kDoublePrecision);
} else {
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vceq.i%d q%d, q%d, q%d",
size, Vd, Vn, Vm);
Vd = instr->VFPDRegValue(kSimd128Precision);
Vm = instr->VFPMRegValue(kSimd128Precision);
Vn = instr->VFPNRegValue(kSimd128Precision);
}
} else if (instr->Bits(11, 8) == 1 && instr->Bits(21, 20) == 1 &&
instr->Bit(4) == 1) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
switch (instr->Bits(11, 8)) {
case 0x0: {
if (instr->Bit(4) == 1) {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
// vqadd.u<size> Qd, Qm, Qn.
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_,
"vqadd.u%d q%d, q%d, q%d", size, Vd, Vn, Vm);
} else {
Unknown(instr);
}
break;
}
case 0x1: {
if (instr->Bits(21, 20) == 1 && instr->Bit(4) == 1) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vbsl q%d, q%d, q%d", Vd, Vn, Vm);
} else if (instr->Bits(11, 8) == 1 && instr->Bits(21, 20) == 0 &&
instr->Bit(4) == 1) {
} else if (instr->Bits(21, 20) == 0 && instr->Bit(4) == 1) {
if (instr->Bit(6) == 0) {
// veor Dd, Dn, Dm
int Vd = instr->VFPDRegValue(kDoublePrecision);
int Vn = instr->VFPNRegValue(kDoublePrecision);
int Vm = instr->VFPMRegValue(kDoublePrecision);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"veor d%d, d%d, d%d", Vd, Vn, Vm);
} else {
// veor Qd, Qn, Qm
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"veor q%d, q%d, q%d", Vd, Vn, Vm);
}
} else if (instr->Bits(11, 8) == 0xd && instr->Bit(21) == 0 &&
instr->Bit(6) == 1 && instr->Bit(4) == 1) {
// vmul.f32 Qd, Qn, Qm
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vmul.f32 q%d, q%d, q%d", Vd, Vn, Vm);
} else if (instr->Bits(11, 8) == 0xe && instr->Bit(20) == 0 &&
instr->Bit(4) == 0) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
const char* op = (instr->Bit(21) == 0) ? "vcge" : "vcgt";
// vcge/vcgt.f32 Qd, Qm, Qn.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"%s.f32 q%d, q%d, q%d", op, Vd, Vn, Vm);
} else if (instr->Bits(11, 8) == 0x3) {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
const char* op = (instr->Bit(4) == 1) ? "vcge" : "vcgt";
// vcge/vcgt.u<size> Qd, Qm, Qn.
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "%s.u%d q%d, q%d, q%d", op,
size, Vd, Vn, Vm);
} else if (instr->Bits(11, 8) == 0x6) {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
// vmin/vmax.u<size> Qd, Qm, Qn.
const char* op = instr->Bit(4) == 1 ? "vmin" : "vmax";
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "%s.u%d q%d, q%d, q%d", op,
size, Vd, Vn, Vm);
} else {
Unknown(instr);
}
break;
}
case 0x2: {
if (instr->Bit(4) == 1) {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
// vqsub.u<size> Qd, Qm, Qn.
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_,
"vqsub.u%d q%d, q%d, q%d", size, Vd, Vn, Vm);
} else {
Unknown(instr);
}
break;
}
case 0x3: {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
const char* op = (instr->Bit(4) == 1) ? "vcge" : "vcgt";
// vcge/vcgt.u<size> Qd, Qm, Qn.
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "%s.u%d q%d, q%d, q%d",
op, size, Vd, Vn, Vm);
break;
}
case 0x6: {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
// vmin/vmax.u<size> Qd, Qm, Qn.
const char* op = instr->Bit(4) == 1 ? "vmin" : "vmax";
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "%s.u%d q%d, q%d, q%d",
op, size, Vd, Vn, Vm);
break;
}
case 0x8: {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
if (instr->Bit(4) == 0) {
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_,
"vsub.i%d q%d, q%d, q%d", size, Vd, Vn, Vm);
} else {
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_,
"vceq.i%d q%d, q%d, q%d", size, Vd, Vn, Vm);
}
break;
}
case 0xd: {
if (instr->Bit(21) == 0 && instr->Bit(6) == 1 && instr->Bit(4) == 1) {
// vmul.f32 Qd, Qn, Qm
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vmul.f32 q%d, q%d, q%d", Vd, Vn, Vm);
} else {
Unknown(instr);
}
break;
}
case 0xe: {
if (instr->Bit(20) == 0 && instr->Bit(4) == 0) {
const char* op = (instr->Bit(21) == 0) ? "vcge" : "vcgt";
// vcge/vcgt.f32 Qd, Qm, Qn.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"%s.f32 q%d, q%d, q%d", op, Vd, Vn, Vm);
} else {
Unknown(instr);
}
break;
}
default:
Unknown(instr);
break;
}
break;
}
case 7:
if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
(instr->Bit(4) == 1)) {
@ -2067,7 +2164,7 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
int Vm = (instr->Bit(5) << 4) | instr->VmValue();
int imm3 = instr->Bits(21, 19);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vmovl.u%d q%d, d%d", imm3*8, Vd, Vm);
"vmovl.u%d q%d, d%d", imm3 * 8, Vd, Vm);
} else if (instr->Opc1Value() == 7 && instr->Bits(21, 20) == 0x3 &&
instr->Bit(4) == 0) {
if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 7) == 0) {
@ -2162,15 +2259,24 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
Unknown(instr);
}
} else if (instr->Bits(19, 18) == 0x2 && instr->Bits(11, 8) == 0x5) {
// vrecpe/vrsqrte.f32 Qd, Qm.
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
const char* op = instr->Bit(7) == 0 ? "vrecpe" : "vrsqrte";
// vrecpe/vrsqrte.f32 Qd, Qm.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"%s.f32 q%d, q%d", op, Vd, Vm);
} else {
Unknown(instr);
}
} else if (instr->Bits(11, 7) == 0 && instr->Bit(4) == 1) {
// vshr.u<size> Qd, Qm, shift
int size = base::bits::RoundDownToPowerOfTwo32(instr->Bits(21, 16));
int shift = 2 * size - instr->Bits(21, 16);
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vshr.u%d q%d, q%d, #%d",
size, Vd, Vm, shift);
} else {
Unknown(instr);
}
@ -2184,8 +2290,8 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
int size = instr->Bits(7, 6);
int align = instr->Bits(5, 4);
int Rm = instr->VmValue();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vst1.%d ", (1 << size) << 3);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "vst1.%d ",
(1 << size) << 3);
FormatNeonList(Vd, type);
Print(", ");
FormatNeonMemory(Rn, align, Rm);
@ -2197,8 +2303,8 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
int size = instr->Bits(7, 6);
int align = instr->Bits(5, 4);
int Rm = instr->VmValue();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vld1.%d ", (1 << size) << 3);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "vld1.%d ",
(1 << size) << 3);
FormatNeonList(Vd, type);
Print(", ");
FormatNeonMemory(Rn, align, Rm);
@ -2212,8 +2318,8 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
int Rn = instr->Bits(19, 16);
int offset = instr->Bits(11, 0);
if (offset == 0) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"pld [r%d]", Rn);
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "pld [r%d]", Rn);
} else if (instr->Bit(23) == 0) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"pld [r%d, #-%d]", Rn, offset);
@ -2225,16 +2331,16 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
int option = instr->Bits(3, 0);
switch (instr->Bits(7, 4)) {
case 4:
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"dsb %s", barrier_option_names[option]);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "dsb %s",
barrier_option_names[option]);
break;
case 5:
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"dmb %s", barrier_option_names[option]);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "dmb %s",
barrier_option_names[option]);
break;
case 6:
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"isb %s", barrier_option_names[option]);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "isb %s",
barrier_option_names[option]);
break;
default:
Unknown(instr);

View File

@ -70,27 +70,6 @@ void FastNewClosureDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastNewRestParameterDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return r0; }
@ -142,15 +121,13 @@ void CallFunctionDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
void CallICTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1, r3};
Register registers[] = {r1, r0, r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
void CallICDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1, r0, r3, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
@ -179,6 +156,13 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallForwardVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r2 : start index (to support rest parameters)
// r1 : the target to call
Register registers[] = {r1, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ConstructStubDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@ -213,13 +197,12 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(0, nullptr, nullptr);
}
#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
void Allocate##Type##Descriptor::InitializePlatformSpecific( \
CallInterfaceDescriptorData* data) { \
data->InitializePlatformSpecific(0, nullptr, nullptr); \
}
SIMD128_TYPES(SIMD128_ALLOC_DESC)
#undef SIMD128_ALLOC_DESC
void ArrayConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
Register registers[] = {r1, r3, r0, r2};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@ -430,6 +413,14 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r1, // loaded new FP
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
} // namespace internal
} // namespace v8

View File

@ -88,11 +88,11 @@ int MacroAssembler::CallStubSize(
return CallSize(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
}
void MacroAssembler::Call(Address target,
RelocInfo::Mode rmode,
Condition cond,
TargetAddressStorageMode mode) {
void MacroAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
TargetAddressStorageMode mode,
bool check_constant_pool) {
// Check if we have to emit the constant pool before we block it.
if (check_constant_pool) MaybeCheckConstPool();
// Block constant pool for the call instruction sequence.
BlockConstPoolScope block_const_pool(this);
Label start;
@ -138,12 +138,10 @@ int MacroAssembler::CallSize(Handle<Code> code,
return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
}
void MacroAssembler::Call(Handle<Code> code,
RelocInfo::Mode rmode,
TypeFeedbackId ast_id,
Condition cond,
TargetAddressStorageMode mode) {
void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
TypeFeedbackId ast_id, Condition cond,
TargetAddressStorageMode mode,
bool check_constant_pool) {
Label start;
bind(&start);
DCHECK(RelocInfo::IsCodeTarget(rmode));
@ -1146,12 +1144,11 @@ void MacroAssembler::VmovExtended(const MemOperand& dst, int src_code,
void MacroAssembler::ExtractLane(Register dst, QwNeonRegister src,
NeonDataType dt, int lane) {
int bytes_per_lane = dt & NeonDataTypeSizeMask; // 1, 2, 4
int log2_bytes_per_lane = bytes_per_lane / 2; // 0, 1, 2
int byte = lane << log2_bytes_per_lane;
int size = NeonSz(dt); // 0, 1, 2
int byte = lane << size;
int double_word = byte >> kDoubleSizeLog2;
int double_byte = byte & (kDoubleSize - 1);
int double_lane = double_byte >> log2_bytes_per_lane;
int double_lane = double_byte >> size;
DwVfpRegister double_source =
DwVfpRegister::from_code(src.code() * 2 + double_word);
vmov(dt, dst, double_source, double_lane);
@ -1166,12 +1163,11 @@ void MacroAssembler::ExtractLane(SwVfpRegister dst, QwNeonRegister src,
void MacroAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
Register src_lane, NeonDataType dt, int lane) {
Move(dst, src);
int bytes_per_lane = dt & NeonDataTypeSizeMask; // 1, 2, 4
int log2_bytes_per_lane = bytes_per_lane / 2; // 0, 1, 2
int byte = lane << log2_bytes_per_lane;
int size = NeonSz(dt); // 0, 1, 2
int byte = lane << size;
int double_word = byte >> kDoubleSizeLog2;
int double_byte = byte & (kDoubleSize - 1);
int double_lane = double_byte >> log2_bytes_per_lane;
int double_lane = double_byte >> size;
DwVfpRegister double_dst =
DwVfpRegister::from_code(dst.code() * 2 + double_word);
vmov(dt, double_dst, double_lane, src_lane);
@ -1399,7 +1395,7 @@ void MacroAssembler::LoadConstantPoolPointerRegister() {
}
void MacroAssembler::StubPrologue(StackFrame::Type type) {
mov(ip, Operand(Smi::FromInt(type)));
mov(ip, Operand(StackFrame::TypeToMarker(type)));
PushCommonFrame(ip);
if (FLAG_enable_embedded_constant_pool) {
LoadConstantPoolPointerRegister();
@ -1431,15 +1427,15 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
ldr(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
ldr(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
ldr(vector, FieldMemOperand(vector, JSFunction::kFeedbackVectorOffset));
ldr(vector, FieldMemOperand(vector, Cell::kValueOffset));
}
void MacroAssembler::EnterFrame(StackFrame::Type type,
bool load_constant_pool_pointer_reg) {
// r0-r3: preserved
mov(ip, Operand(Smi::FromInt(type)));
mov(ip, Operand(StackFrame::TypeToMarker(type)));
PushCommonFrame(ip);
if (FLAG_enable_embedded_constant_pool && load_constant_pool_pointer_reg) {
LoadConstantPoolPointerRegister();
@ -1494,7 +1490,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
mov(ip, Operand(Smi::FromInt(frame_type)));
mov(ip, Operand(StackFrame::TypeToMarker(frame_type)));
PushCommonFrame(ip);
// Reserve room for saved entry sp and code object.
sub(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp));
@ -1539,21 +1535,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
}
void MacroAssembler::InitializeNewString(Register string,
Register length,
Heap::RootListIndex map_index,
Register scratch1,
Register scratch2) {
SmiTag(scratch1, length);
LoadRoot(scratch2, map_index);
str(scratch1, FieldMemOperand(string, String::kLengthOffset));
mov(scratch1, Operand(String::kEmptyHashField));
str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
str(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
}
int MacroAssembler::ActivationFrameAlignment() {
#if V8_HOST_ARCH_ARM
// Running on the real platform. Use the alignment as mandated by the local
@ -1921,17 +1902,17 @@ void MacroAssembler::IsObjectNameType(Register object,
b(hi, fail);
}
void MacroAssembler::DebugBreak() {
mov(r0, Operand::Zero());
mov(r1,
Operand(ExternalReference(Runtime::kHandleDebuggerStatement, isolate())));
CEntryStub ces(isolate(), 1);
DCHECK(AllowThisStubCall(&ces));
Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
void MacroAssembler::MaybeDropFrames() {
// Check whether we need to drop frames to restart a function on the stack.
ExternalReference restart_fp =
ExternalReference::debug_restart_fp_address(isolate());
mov(r1, Operand(restart_fp));
ldr(r1, MemOperand(r1));
tst(r1, r1);
Jump(isolate()->builtins()->FrameDropperTrampoline(), RelocInfo::CODE_TARGET,
ne);
}
void MacroAssembler::PushStackHandler() {
// Adjust this code if not the case.
STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
@ -2430,38 +2411,12 @@ void MacroAssembler::GetMapConstructor(Register result, Register map,
bind(&done);
}
void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
Register scratch, Label* miss) {
// Get the prototype or initial map from the function.
ldr(result,
FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
// If the prototype or initial map is the hole, don't return it and
// simply miss the cache instead. This will allow us to allocate a
// prototype object on-demand in the runtime system.
LoadRoot(ip, Heap::kTheHoleValueRootIndex);
cmp(result, ip);
b(eq, miss);
// If the function does not have an initial map, we're done.
Label done;
CompareObjectType(result, scratch, scratch, MAP_TYPE);
b(ne, &done);
// Get the prototype from the initial map.
ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
// All done.
bind(&done);
}
void MacroAssembler::CallStub(CodeStub* stub,
TypeFeedbackId ast_id,
Condition cond) {
DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond,
CAN_INLINE_TARGET_ADDRESS, false);
}

View File

@ -107,12 +107,13 @@ class MacroAssembler: public Assembler {
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
void Call(Register target, Condition cond = al);
void Call(Address target, RelocInfo::Mode rmode,
Condition cond = al,
TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
void Call(Address target, RelocInfo::Mode rmode, Condition cond = al,
TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS,
bool check_constant_pool = true);
void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
TypeFeedbackId ast_id = TypeFeedbackId::None(), Condition cond = al,
TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS,
bool check_constant_pool = true);
int CallSize(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
TypeFeedbackId ast_id = TypeFeedbackId::None(),
@ -714,12 +715,9 @@ class MacroAssembler: public Assembler {
Register scratch,
Label* fail);
// ---------------------------------------------------------------------------
// Debugger Support
// Frame restart support
void MaybeDropFrames();
void DebugBreak();
// ---------------------------------------------------------------------------
// Exception handling
// Push a new stack handler and link into stack handler chain.
@ -834,14 +832,6 @@ class MacroAssembler: public Assembler {
void GetMapConstructor(Register result, Register map, Register temp,
Register temp2);
// Try to get function prototype of a function and puts the value in
// the result register. Checks that the function really is a
// function and jumps to the miss label if the fast checks fail. The
// function register will be untouched; the other registers may be
// clobbered.
void TryGetFunctionPrototype(Register function, Register result,
Register scratch, Label* miss);
// Compare object type for heap object. heap_object contains a non-Smi
// whose object type should be compared with the given type. This both
// sets the flags and leaves the object type in the type_reg register.
@ -1430,12 +1420,6 @@ class MacroAssembler: public Assembler {
InvokeFlag flag,
const CallWrapper& call_wrapper);
void InitializeNewString(Register string,
Register length,
Heap::RootListIndex map_index,
Register scratch1,
Register scratch2);
// Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
void InNewSpace(Register object,
Register scratch,

File diff suppressed because it is too large Load Diff

View File

@ -14,6 +14,8 @@
#define V8_ARM_SIMULATOR_ARM_H_
#include "src/allocation.h"
#include "src/base/lazy-instance.h"
#include "src/base/platform/mutex.h"
#if !defined(USE_SIMULATOR)
// Running without a simulator on a native arm platform.
@ -302,19 +304,27 @@ class Simulator {
void PrintStopInfo(uint32_t code);
// Read and write memory.
// The *Ex functions are exclusive access. The writes return the strex status:
// 0 if the write succeeds, and 1 if the write fails.
inline uint8_t ReadBU(int32_t addr);
inline int8_t ReadB(int32_t addr);
uint8_t ReadExBU(int32_t addr);
inline void WriteB(int32_t addr, uint8_t value);
inline void WriteB(int32_t addr, int8_t value);
int WriteExB(int32_t addr, uint8_t value);
inline uint16_t ReadHU(int32_t addr, Instruction* instr);
inline int16_t ReadH(int32_t addr, Instruction* instr);
uint16_t ReadExHU(int32_t addr, Instruction* instr);
// Note: Overloaded on the sign of the value.
inline void WriteH(int32_t addr, uint16_t value, Instruction* instr);
inline void WriteH(int32_t addr, int16_t value, Instruction* instr);
int WriteExH(int32_t addr, uint16_t value, Instruction* instr);
inline int ReadW(int32_t addr, Instruction* instr);
int ReadExW(int32_t addr, Instruction* instr);
inline void WriteW(int32_t addr, int value, Instruction* instr);
int WriteExW(int32_t addr, int value, Instruction* instr);
int32_t* ReadDW(int32_t addr);
void WriteDW(int32_t addr, int32_t value1, int32_t value2);
@ -437,6 +447,94 @@ class Simulator {
char* desc;
};
StopCountAndDesc watched_stops_[kNumOfWatchedStops];
// Syncronization primitives. See ARM DDI 0406C.b, A2.9.
enum class MonitorAccess {
Open,
Exclusive,
};
enum class TransactionSize {
None = 0,
Byte = 1,
HalfWord = 2,
Word = 4,
};
// The least-significant bits of the address are ignored. The number of bits
// is implementation-defined, between 3 and 11. See ARM DDI 0406C.b, A3.4.3.
static const int32_t kExclusiveTaggedAddrMask = ~((1 << 11) - 1);
class LocalMonitor {
public:
LocalMonitor();
// These functions manage the state machine for the local monitor, but do
// not actually perform loads and stores. NotifyStoreExcl only returns
// true if the exclusive store is allowed; the global monitor will still
// have to be checked to see whether the memory should be updated.
void NotifyLoad(int32_t addr);
void NotifyLoadExcl(int32_t addr, TransactionSize size);
void NotifyStore(int32_t addr);
bool NotifyStoreExcl(int32_t addr, TransactionSize size);
private:
void Clear();
MonitorAccess access_state_;
int32_t tagged_addr_;
TransactionSize size_;
};
class GlobalMonitor {
public:
GlobalMonitor();
class Processor {
public:
Processor();
private:
friend class GlobalMonitor;
// These functions manage the state machine for the global monitor, but do
// not actually perform loads and stores.
void Clear_Locked();
void NotifyLoadExcl_Locked(int32_t addr);
void NotifyStore_Locked(int32_t addr, bool is_requesting_processor);
bool NotifyStoreExcl_Locked(int32_t addr, bool is_requesting_processor);
MonitorAccess access_state_;
int32_t tagged_addr_;
Processor* next_;
Processor* prev_;
// A strex can fail due to background cache evictions. Rather than
// simulating this, we'll just occasionally introduce cases where an
// exclusive store fails. This will happen once after every
// kMaxFailureCounter exclusive stores.
static const int kMaxFailureCounter = 5;
int failure_counter_;
};
// Exposed so it can be accessed by Simulator::{Read,Write}Ex*.
base::Mutex mutex;
void NotifyLoadExcl_Locked(int32_t addr, Processor* processor);
void NotifyStore_Locked(int32_t addr, Processor* processor);
bool NotifyStoreExcl_Locked(int32_t addr, Processor* processor);
// Called when the simulator is destroyed.
void RemoveProcessor(Processor* processor);
private:
bool IsProcessorInLinkedList_Locked(Processor* processor) const;
void PrependProcessor_Locked(Processor* processor);
Processor* head_;
};
LocalMonitor local_monitor_;
GlobalMonitor::Processor global_monitor_processor_;
static base::LazyInstance<GlobalMonitor>::type global_monitor_;
};

View File

@ -8,7 +8,7 @@
#include "src/arm64/assembler-arm64.h"
#include "src/assembler.h"
#include "src/debug/debug.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {

View File

@ -198,6 +198,7 @@ struct Register : public CPURegister {
};
static const bool kSimpleFPAliasing = true;
static const bool kSimdMaskRegisters = false;
struct FPRegister : public CPURegister {
enum Code {

File diff suppressed because it is too large Load Diff

View File

@ -99,6 +99,9 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
Register result,
Label* call_runtime) {
DCHECK(string.Is64Bits() && index.Is32Bits() && result.Is64Bits());
Label indirect_string_loaded;
__ Bind(&indirect_string_loaded);
// Fetch the instance type of the receiver into result register.
__ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
__ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
@ -108,17 +111,25 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ TestAndBranchIfAllClear(result, kIsIndirectStringMask, &check_sequential);
// Dispatch on the indirect string shape: slice or cons.
Label cons_string;
__ TestAndBranchIfAllClear(result, kSlicedNotConsMask, &cons_string);
Label cons_string, thin_string;
__ And(result, result, kStringRepresentationMask);
__ Cmp(result, kConsStringTag);
__ B(eq, &cons_string);
__ Cmp(result, kThinStringTag);
__ B(eq, &thin_string);
// Handle slices.
Label indirect_string_loaded;
__ Ldr(result.W(),
UntagSmiFieldMemOperand(string, SlicedString::kOffsetOffset));
__ Ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
__ Add(index, index, result.W());
__ B(&indirect_string_loaded);
// Handle thin strings.
__ Bind(&thin_string);
__ Ldr(string, FieldMemOperand(string, ThinString::kActualOffset));
__ B(&indirect_string_loaded);
// Handle cons strings.
// Check whether the right hand side is the empty string (i.e. if
// this is really a flat string in a cons string). If that is not
@ -129,10 +140,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ JumpIfNotRoot(result, Heap::kempty_stringRootIndex, call_runtime);
// Get the first of the two strings and load its instance type.
__ Ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
__ Bind(&indirect_string_loaded);
__ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
__ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
__ B(&indirect_string_loaded);
// Distinguish sequential and external strings. Only these two string
// representations can reach here (slices and flat cons strings have been

View File

@ -78,7 +78,7 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
double double_value = input_->GetDoubleRegister(i);
Float64 double_value = input_->GetDoubleRegister(i);
output_frame->SetDoubleRegister(i, double_value);
}
}

View File

@ -71,30 +71,6 @@ void FastNewClosureDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastNewRestParameterDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1: function
Register registers[] = {x1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastNewSloppyArgumentsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1: function
Register registers[] = {x1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastNewStrictArgumentsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1: function
Register registers[] = {x1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
// static
const Register TypeConversionDescriptor::ArgumentRegister() { return x0; }
@ -163,15 +139,13 @@ void CallFunctionDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionWithFeedbackDescriptor::InitializePlatformSpecific(
void CallICTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {x1, x3};
Register registers[] = {x1, x0, x3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallFunctionWithFeedbackAndVectorDescriptor::InitializePlatformSpecific(
void CallICDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {x1, x0, x3, x2};
data->InitializePlatformSpecific(arraysize(registers), registers);
@ -200,6 +174,13 @@ void CallTrampolineDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void CallForwardVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x1: target
// x2: start index (to supported rest parameters)
Register registers[] = {x1, x2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ConstructStubDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@ -236,13 +217,12 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(0, nullptr, nullptr);
}
#define SIMD128_ALLOC_DESC(TYPE, Type, type, lane_count, lane_type) \
void Allocate##Type##Descriptor::InitializePlatformSpecific( \
CallInterfaceDescriptorData* data) { \
data->InitializePlatformSpecific(0, nullptr, nullptr); \
}
SIMD128_TYPES(SIMD128_ALLOC_DESC)
#undef SIMD128_ALLOC_DESC
void ArrayConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
Register registers[] = {x1, x3, x0, x2};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
}
void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
@ -461,6 +441,14 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FrameDropperTrampolineDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
x1, // loaded new FP
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
} // namespace internal
} // namespace v8

View File

@ -1780,23 +1780,6 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
JumpToExternalReference(ExternalReference(fid, isolate()));
}
void MacroAssembler::InitializeNewString(Register string,
Register length,
Heap::RootListIndex map_index,
Register scratch1,
Register scratch2) {
DCHECK(!AreAliased(string, length, scratch1, scratch2));
LoadRoot(scratch2, map_index);
SmiTag(scratch1, length);
Str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
Mov(scratch2, String::kEmptyHashField);
Str(scratch1, FieldMemOperand(string, String::kLengthOffset));
Str(scratch2, FieldMemOperand(string, String::kHashFieldOffset));
}
int MacroAssembler::ActivationFrameAlignment() {
#if V8_HOST_ARCH_ARM64
// Running on the real platform. Use the alignment as mandated by the local
@ -2618,7 +2601,7 @@ void MacroAssembler::StubPrologue(StackFrame::Type type, int frame_slots) {
UseScratchRegisterScope temps(this);
frame_slots -= TypedFrameConstants::kFixedSlotCountAboveFp;
Register temp = temps.AcquireX();
Mov(temp, Smi::FromInt(type));
Mov(temp, StackFrame::TypeToMarker(type));
Push(lr, fp);
Mov(fp, StackPointer());
Claim(frame_slots);
@ -2636,8 +2619,8 @@ void MacroAssembler::Prologue(bool code_pre_aging) {
void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
Ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
Ldr(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
Ldr(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
Ldr(vector, FieldMemOperand(vector, JSFunction::kFeedbackVectorOffset));
Ldr(vector, FieldMemOperand(vector, Cell::kValueOffset));
}
@ -2655,7 +2638,7 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
if (type == StackFrame::INTERNAL) {
DCHECK(jssp.Is(StackPointer()));
Mov(type_reg, Smi::FromInt(type));
Mov(type_reg, StackFrame::TypeToMarker(type));
Push(lr, fp);
Push(type_reg);
Mov(code_reg, Operand(CodeObject()));
@ -2667,17 +2650,17 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
// jssp[0] : [code object]
} else if (type == StackFrame::WASM_COMPILED) {
DCHECK(csp.Is(StackPointer()));
Mov(type_reg, Smi::FromInt(type));
Push(xzr, lr);
Push(fp, type_reg);
Add(fp, csp, TypedFrameConstants::kFixedFrameSizeFromFp);
// csp[3] for alignment
// csp[2] : lr
// csp[1] : fp
// csp[0] : type
Mov(type_reg, StackFrame::TypeToMarker(type));
Push(lr, fp);
Mov(fp, csp);
Push(type_reg, xzr);
// csp[3] : lr
// csp[2] : fp
// csp[1] : type
// csp[0] : for alignment
} else {
DCHECK(jssp.Is(StackPointer()));
Mov(type_reg, Smi::FromInt(type));
Mov(type_reg, StackFrame::TypeToMarker(type));
Push(lr, fp);
Push(type_reg);
Add(fp, jssp, TypedFrameConstants::kFixedFrameSizeFromFp);
@ -2689,12 +2672,19 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
if (type == StackFrame::WASM_COMPILED) {
DCHECK(csp.Is(StackPointer()));
Mov(csp, fp);
AssertStackConsistency();
Pop(fp, lr);
} else {
DCHECK(jssp.Is(StackPointer()));
// Drop the execution stack down to the frame pointer and restore
// the caller frame pointer and return address.
Mov(jssp, fp);
AssertStackConsistency();
Pop(fp, lr);
}
}
@ -2741,7 +2731,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
// Set up the new stack frame.
Push(lr, fp);
Mov(fp, StackPointer());
Mov(scratch, Smi::FromInt(frame_type));
Mov(scratch, StackFrame::TypeToMarker(frame_type));
Push(scratch);
Push(xzr);
Mov(scratch, Operand(CodeObject()));
@ -2888,16 +2878,17 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
}
void MacroAssembler::DebugBreak() {
Mov(x0, 0);
Mov(x1, ExternalReference(Runtime::kHandleDebuggerStatement, isolate()));
CEntryStub ces(isolate(), 1);
DCHECK(AllowThisStubCall(&ces));
Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT);
void MacroAssembler::MaybeDropFrames() {
// Check whether we need to drop frames to restart a function on the stack.
ExternalReference restart_fp =
ExternalReference::debug_restart_fp_address(isolate());
Mov(x1, Operand(restart_fp));
Ldr(x1, MemOperand(x1));
Tst(x1, x1);
Jump(isolate()->builtins()->FrameDropperTrampoline(), RelocInfo::CODE_TARGET,
ne);
}
void MacroAssembler::PushStackHandler() {
DCHECK(jssp.Is(StackPointer()));
// Adjust this code if the asserts don't hold.
@ -3407,32 +3398,6 @@ void MacroAssembler::GetMapConstructor(Register result, Register map,
Bind(&done);
}
void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
Register scratch, Label* miss) {
DCHECK(!AreAliased(function, result, scratch));
// Get the prototype or initial map from the function.
Ldr(result,
FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
// If the prototype or initial map is the hole, don't return it and simply
// miss the cache instead. This will allow us to allocate a prototype object
// on-demand in the runtime system.
JumpIfRoot(result, Heap::kTheHoleValueRootIndex, miss);
// If the function does not have an initial map, we're done.
Label done;
JumpIfNotObjectType(result, scratch, scratch, MAP_TYPE, &done);
// Get the prototype from the initial map.
Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
// All done.
Bind(&done);
}
void MacroAssembler::PushRoot(Heap::RootListIndex index) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
@ -4645,9 +4610,8 @@ void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg,
}
}
InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
: reg_(NoReg), smi_check_(NULL) {
: reg_(NoReg), smi_check_delta_(0), smi_check_(NULL) {
InstructionSequence* inline_data = InstructionSequence::At(info);
DCHECK(inline_data->IsInlineData());
if (inline_data->IsInlineData()) {
@ -4659,9 +4623,9 @@ InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
uint32_t payload32 = static_cast<uint32_t>(payload);
int reg_code = RegisterBits::decode(payload32);
reg_ = Register::XRegFromCode(reg_code);
int smi_check_delta = DeltaBits::decode(payload32);
DCHECK(smi_check_delta != 0);
smi_check_ = inline_data->preceding(smi_check_delta);
smi_check_delta_ = DeltaBits::decode(payload32);
DCHECK_NE(0, smi_check_delta_);
smi_check_ = inline_data->preceding(smi_check_delta_);
}
}
}

View File

@ -1300,12 +1300,9 @@ class MacroAssembler : public Assembler {
MacroAssembler* masm_;
};
// ---------------------------------------------------------------------------
// Debugger Support
// Frame restart support
void MaybeDropFrames();
void DebugBreak();
// ---------------------------------------------------------------------------
// Exception handling
// Push a new stack handler and link into stack handler chain.
@ -1371,9 +1368,6 @@ class MacroAssembler : public Assembler {
void GetMapConstructor(Register result, Register map, Register temp,
Register temp2);
void TryGetFunctionPrototype(Register function, Register result,
Register scratch, Label* miss);
// Compare object type for heap object. heap_object contains a non-Smi
// whose object type should be compared with the given type. This both
// sets the flags and leaves the object type in the type_reg register.
@ -2002,12 +1996,6 @@ class MacroAssembler : public Assembler {
CPURegList tmp_list_;
CPURegList fptmp_list_;
void InitializeNewString(Register string,
Register length,
Heap::RootListIndex map_index,
Register scratch1,
Register scratch2);
public:
// Far branches resolving.
//
@ -2157,6 +2145,8 @@ class InlineSmiCheckInfo {
return smi_check_;
}
int SmiCheckDelta() const { return smi_check_delta_; }
// Use MacroAssembler::InlineData to emit information about patchable inline
// SMI checks. The caller may specify 'reg' as NoReg and an unbound 'site' to
// indicate that there is no inline SMI check. Note that 'reg' cannot be csp.
@ -2174,6 +2164,7 @@ class InlineSmiCheckInfo {
private:
Register reg_;
int smi_check_delta_;
Instruction* smi_check_;
// Fields in the data encoded by InlineData.

View File

@ -9,11 +9,13 @@
#include "src/asmjs/asm-typer.h"
#include "src/asmjs/asm-wasm-builder.h"
#include "src/assert-scope.h"
#include "src/base/platform/elapsed-timer.h"
#include "src/compilation-info.h"
#include "src/execution.h"
#include "src/factory.h"
#include "src/handles.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/objects.h"
#include "src/parsing/parse-info.h"
@ -186,11 +188,14 @@ MaybeHandle<FixedArray> AsmJs::CompileAsmViaWasm(CompilationInfo* info) {
base::ElapsedTimer compile_timer;
compile_timer.Start();
MaybeHandle<JSObject> compiled = wasm::CreateModuleObjectFromBytes(
info->isolate(), module->begin(), module->end(), &thrower,
internal::wasm::kAsmJsOrigin, info->script(), asm_offsets_vec);
MaybeHandle<JSObject> compiled = SyncCompileTranslatedAsmJs(
info->isolate(), &thrower,
wasm::ModuleWireBytes(module->begin(), module->end()), info->script(),
asm_offsets_vec);
DCHECK(!compiled.is_null());
double compile_time = compile_timer.Elapsed().InMillisecondsF();
DCHECK_GE(module->end(), module->begin());
uintptr_t wasm_size = module->end() - module->begin();
wasm::AsmTyper::StdlibSet uses = builder.typer()->StdlibUses();
Handle<FixedArray> uses_array =
@ -216,10 +221,10 @@ MaybeHandle<FixedArray> AsmJs::CompileAsmViaWasm(CompilationInfo* info) {
if (FLAG_predictable) {
length = base::OS::SNPrintF(text, arraysize(text), "success");
} else {
length =
base::OS::SNPrintF(text, arraysize(text),
"success, asm->wasm: %0.3f ms, compile: %0.3f ms",
asm_wasm_time, compile_time);
length = base::OS::SNPrintF(
text, arraysize(text),
"success, asm->wasm: %0.3f ms, compile: %0.3f ms, %" PRIuPTR " bytes",
asm_wasm_time, compile_time, wasm_size);
}
DCHECK_NE(-1, length);
USE(length);
@ -271,22 +276,18 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(i::Isolate* isolate,
foreign, NONE);
}
i::MaybeHandle<i::JSObject> maybe_module_object =
i::wasm::WasmModule::Instantiate(isolate, &thrower, module, ffi_object,
memory);
i::MaybeHandle<i::Object> maybe_module_object =
i::wasm::SyncInstantiate(isolate, &thrower, module, ffi_object, memory);
if (maybe_module_object.is_null()) {
return MaybeHandle<Object>();
}
i::Handle<i::Object> module_object = maybe_module_object.ToHandleChecked();
i::Handle<i::Name> init_name(isolate->factory()->InternalizeUtf8String(
wasm::AsmWasmBuilder::foreign_init_name));
i::Handle<i::Object> init =
i::Object::GetProperty(module_object, init_name).ToHandleChecked();
i::Handle<i::Object> module_object = maybe_module_object.ToHandleChecked();
i::MaybeHandle<i::Object> maybe_init =
i::Object::GetProperty(module_object, init_name);
DCHECK(!maybe_init.is_null());
i::Handle<i::Object> init = maybe_init.ToHandleChecked();
i::Handle<i::Object> undefined(isolate->heap()->undefined_value(), isolate);
i::Handle<i::Object>* foreign_args_array =
new i::Handle<i::Object>[foreign_globals->length()];
@ -345,7 +346,9 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(i::Isolate* isolate,
MessageHandler::ReportMessage(isolate, &location, message);
}
return module_object;
Handle<String> exports_name =
isolate->factory()->InternalizeUtf8String("exports");
return i::Object::GetProperty(module_object, exports_name);
}
} // namespace internal

View File

@ -19,6 +19,7 @@
#include "src/codegen.h"
#include "src/globals.h"
#include "src/messages.h"
#include "src/objects-inl.h"
#include "src/utils.h"
#include "src/vector.h"
@ -385,6 +386,10 @@ AsmTyper::VariableInfo* AsmTyper::ImportLookup(Property* import) {
return obj_info;
}
if (!key->IsPropertyName()) {
return nullptr;
}
std::unique_ptr<char[]> aname = key->AsPropertyName()->ToCString();
ObjectTypeMap::iterator i = stdlib->find(std::string(aname.get()));
if (i == stdlib->end()) {
@ -569,6 +574,8 @@ bool AsmTyper::ValidateAfterFunctionsPhase() {
void AsmTyper::ClearFunctionNodeTypes() { function_node_types_.clear(); }
AsmType* AsmTyper::TriggerParsingError() { FAIL(root_, "Parsing error"); }
namespace {
bool IsUseAsmDirective(Statement* first_statement) {
ExpressionStatement* use_asm = first_statement->AsExpressionStatement();
@ -1219,10 +1226,12 @@ AsmType* AsmTyper::ValidateFunction(FunctionDeclaration* fun_decl) {
if (as_block != nullptr) {
statements = as_block->statements();
} else {
// We don't check whether AsReturnStatement() below returns non-null --
// we leave that to the ReturnTypeAnnotations method.
if (auto* ret_statement = last_statement->AsReturnStatement()) {
RECURSE(return_type_ =
ReturnTypeAnnotations(last_statement->AsReturnStatement()));
ReturnTypeAnnotations(ret_statement->expression()));
} else {
return_type_ = AsmType::Void();
}
}
}
} while (return_type_ == AsmType::None());
@ -2741,15 +2750,8 @@ AsmType* AsmTyper::ParameterTypeAnnotations(Variable* parameter,
}
// 5.2 ReturnTypeAnnotations
AsmType* AsmTyper::ReturnTypeAnnotations(ReturnStatement* statement) {
if (statement == nullptr) {
return AsmType::Void();
}
auto* ret_expr = statement->expression();
if (ret_expr == nullptr) {
return AsmType::Void();
}
AsmType* AsmTyper::ReturnTypeAnnotations(Expression* ret_expr) {
DCHECK_NOT_NULL(ret_expr);
if (auto* binop = ret_expr->AsBinaryOperation()) {
if (IsDoubleAnnotation(binop)) {
@ -2757,14 +2759,14 @@ AsmType* AsmTyper::ReturnTypeAnnotations(ReturnStatement* statement) {
} else if (IsIntAnnotation(binop)) {
return AsmType::Signed();
}
FAIL(statement, "Invalid return type annotation.");
FAIL(ret_expr, "Invalid return type annotation.");
}
if (auto* call = ret_expr->AsCall()) {
if (IsCallToFround(call)) {
return AsmType::Float();
}
FAIL(statement, "Invalid function call in return statement.");
FAIL(ret_expr, "Invalid function call in return statement.");
}
if (auto* literal = ret_expr->AsLiteral()) {
@ -2783,28 +2785,46 @@ AsmType* AsmTyper::ReturnTypeAnnotations(ReturnStatement* statement) {
// return undefined
return AsmType::Void();
}
FAIL(statement, "Invalid literal in return statement.");
FAIL(ret_expr, "Invalid literal in return statement.");
}
if (auto* proxy = ret_expr->AsVariableProxy()) {
auto* var_info = Lookup(proxy->var());
if (var_info == nullptr) {
FAIL(statement, "Undeclared identifier in return statement.");
FAIL(ret_expr, "Undeclared identifier in return statement.");
}
if (var_info->mutability() != VariableInfo::kConstGlobal) {
FAIL(statement, "Identifier in return statement is not const.");
FAIL(ret_expr, "Identifier in return statement is not const.");
}
if (!var_info->type()->IsReturnType()) {
FAIL(statement, "Constant in return must be signed, float, or double.");
FAIL(ret_expr, "Constant in return must be signed, float, or double.");
}
return var_info->type();
}
FAIL(statement, "Invalid return type expression.");
// NOTE: This is not strictly valid asm.js, but is emitted by some versions of
// Emscripten.
if (auto* cond = ret_expr->AsConditional()) {
AsmType* a = AsmType::None();
AsmType* b = AsmType::None();
RECURSE(a = ReturnTypeAnnotations(cond->then_expression()));
if (a->IsA(AsmType::None())) {
return a;
}
RECURSE(b = ReturnTypeAnnotations(cond->else_expression()));
if (b->IsA(AsmType::None())) {
return b;
}
if (a->IsExactly(b)) {
return a;
}
}
FAIL(ret_expr, "Invalid return type expression.");
}
// 5.4 VariableTypeAnnotations

View File

@ -82,6 +82,8 @@ class AsmTyper final {
Handle<JSMessageObject> error_message() const { return error_message_; }
const MessageLocation* message_location() const { return &message_location_; }
AsmType* TriggerParsingError();
AsmType* TypeOf(AstNode* node) const;
AsmType* TypeOf(Variable* v) const;
StandardMember VariableAsStandardMember(Variable* var);
@ -362,7 +364,7 @@ class AsmTyper final {
AsmType* ParameterTypeAnnotations(Variable* parameter,
Expression* annotation);
// 5.2 ReturnTypeAnnotations
AsmType* ReturnTypeAnnotations(ReturnStatement* statement);
AsmType* ReturnTypeAnnotations(Expression* ret_expr);
// 5.4 VariableTypeAnnotations
// 5.5 GlobalVariableTypeAnnotations
AsmType* VariableTypeAnnotations(

View File

@ -22,7 +22,9 @@
#include "src/codegen.h"
#include "src/compilation-info.h"
#include "src/compiler.h"
#include "src/counters.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
#include "src/parsing/parse-info.h"
namespace v8 {
@ -36,6 +38,8 @@ namespace wasm {
if (HasStackOverflow()) return; \
} while (false)
namespace {
enum AsmScope { kModuleScope, kInitScope, kFuncScope, kExportScope };
enum ValueFate { kDrop, kLeaveOnStack };
@ -45,6 +49,10 @@ struct ForeignVariable {
ValueType type;
};
enum TargetType : uint8_t { NoTarget, BreakTarget, ContinueTarget };
} // namespace
class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
public:
AsmWasmBuilderImpl(Isolate* isolate, Zone* zone, CompilationInfo* info,
@ -99,7 +107,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
foreign_init_function_->EmitGetLocal(static_cast<uint32_t>(pos));
ForeignVariable* fv = &foreign_variables_[pos];
uint32_t index = LookupOrInsertGlobal(fv->var, fv->type);
foreign_init_function_->EmitWithVarInt(kExprSetGlobal, index);
foreign_init_function_->EmitWithVarUint(kExprSetGlobal, index);
}
foreign_init_function_->Emit(kExprEnd);
}
@ -142,31 +150,36 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
DCHECK_EQ(kModuleScope, scope_);
DCHECK_NULL(current_function_builder_);
FunctionLiteral* old_func = decl->fun();
Zone zone(isolate_->allocator(), ZONE_NAME);
DeclarationScope* new_func_scope = nullptr;
std::unique_ptr<ParseInfo> info;
if (decl->fun()->body() == nullptr) {
// TODO(titzer/bradnelson): Reuse SharedFunctionInfos used here when
// compiling the wasm module.
Handle<SharedFunctionInfo> shared =
Compiler::GetSharedFunctionInfo(decl->fun(), script_, info_);
shared->set_is_toplevel(false);
ParseInfo info(&zone, script_);
info.set_shared_info(shared);
info.set_toplevel(false);
info.set_language_mode(decl->fun()->scope()->language_mode());
info.set_allow_lazy_parsing(false);
info.set_function_literal_id(shared->function_literal_id());
info.set_ast_value_factory(ast_value_factory_);
info.set_ast_value_factory_owned(false);
info.reset(new ParseInfo(script_));
info->set_shared_info(shared);
info->set_toplevel(false);
info->set_language_mode(decl->fun()->scope()->language_mode());
info->set_allow_lazy_parsing(false);
info->set_function_literal_id(shared->function_literal_id());
info->set_ast_value_factory(ast_value_factory_);
info->set_ast_value_factory_owned(false);
// Create fresh function scope to use to parse the function in.
new_func_scope = new (info.zone()) DeclarationScope(
info.zone(), decl->fun()->scope()->outer_scope(), FUNCTION_SCOPE);
info.set_asm_function_scope(new_func_scope);
if (!Compiler::ParseAndAnalyze(&info)) {
new_func_scope = new (info->zone()) DeclarationScope(
info->zone(), decl->fun()->scope()->outer_scope(), FUNCTION_SCOPE);
info->set_asm_function_scope(new_func_scope);
if (!Compiler::ParseAndAnalyze(info.get())) {
decl->fun()->scope()->outer_scope()->RemoveInnerScope(new_func_scope);
if (isolate_->has_pending_exception()) {
isolate_->clear_pending_exception();
}
typer_->TriggerParsingError();
typer_failed_ = true;
return;
}
FunctionLiteral* func = info.literal();
FunctionLiteral* func = info->literal();
DCHECK_NOT_NULL(func);
decl->set_fun(func);
}
@ -226,7 +239,8 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
}
}
if (scope_ == kFuncScope) {
BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprBlock);
BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprBlock,
BreakTarget);
RECURSE(VisitStatements(stmt->statements()));
} else {
RECURSE(VisitStatements(stmt->statements()));
@ -239,10 +253,9 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
public:
BlockVisitor(AsmWasmBuilderImpl* builder, BreakableStatement* stmt,
WasmOpcode opcode)
WasmOpcode opcode, TargetType target_type = NoTarget)
: builder_(builder) {
builder_->breakable_blocks_.push_back(
std::make_pair(stmt, opcode == kExprLoop));
builder_->breakable_blocks_.emplace_back(stmt, target_type);
// block and loops have a type immediate.
builder_->current_function_builder_->EmitWithU8(opcode, kLocalVoid);
}
@ -290,9 +303,8 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
void VisitIfStatement(IfStatement* stmt) {
DCHECK_EQ(kFuncScope, scope_);
RECURSE(Visit(stmt->condition()));
current_function_builder_->EmitWithU8(kExprIf, kLocalVoid);
// WASM ifs come with implement blocks for both arms.
breakable_blocks_.push_back(std::make_pair(nullptr, false));
// Wasm ifs come with implicit blocks for both arms.
BlockVisitor block(this, nullptr, kExprIf);
if (stmt->HasThenStatement()) {
RECURSE(Visit(stmt->then_statement()));
}
@ -300,18 +312,15 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
current_function_builder_->Emit(kExprElse);
RECURSE(Visit(stmt->else_statement()));
}
current_function_builder_->Emit(kExprEnd);
breakable_blocks_.pop_back();
}
void DoBreakOrContinue(BreakableStatement* target, bool is_continue) {
void DoBreakOrContinue(BreakableStatement* target, TargetType type) {
DCHECK_EQ(kFuncScope, scope_);
for (int i = static_cast<int>(breakable_blocks_.size()) - 1; i >= 0; --i) {
auto elem = breakable_blocks_.at(i);
if (elem.first == target && elem.second == is_continue) {
if (elem.first == target && elem.second == type) {
int block_distance = static_cast<int>(breakable_blocks_.size() - i - 1);
current_function_builder_->Emit(kExprBr);
current_function_builder_->EmitVarInt(block_distance);
current_function_builder_->EmitWithVarUint(kExprBr, block_distance);
return;
}
}
@ -319,11 +328,11 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
}
void VisitContinueStatement(ContinueStatement* stmt) {
DoBreakOrContinue(stmt->target(), true);
DoBreakOrContinue(stmt->target(), ContinueTarget);
}
void VisitBreakStatement(BreakStatement* stmt) {
DoBreakOrContinue(stmt->target(), false);
DoBreakOrContinue(stmt->target(), BreakTarget);
}
void VisitReturnStatement(ReturnStatement* stmt) {
@ -361,7 +370,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
current_function_builder_->Emit(kExprI32LtS);
current_function_builder_->EmitWithU8(kExprIf, kLocalVoid);
if_depth++;
breakable_blocks_.push_back(std::make_pair(nullptr, false));
breakable_blocks_.emplace_back(nullptr, NoTarget);
HandleCase(node->left, case_to_block, tag, default_block, if_depth);
current_function_builder_->Emit(kExprElse);
}
@ -371,7 +380,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
current_function_builder_->Emit(kExprI32GtS);
current_function_builder_->EmitWithU8(kExprIf, kLocalVoid);
if_depth++;
breakable_blocks_.push_back(std::make_pair(nullptr, false));
breakable_blocks_.emplace_back(nullptr, NoTarget);
HandleCase(node->right, case_to_block, tag, default_block, if_depth);
current_function_builder_->Emit(kExprElse);
}
@ -382,7 +391,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
current_function_builder_->EmitWithU8(kExprIf, kLocalVoid);
DCHECK(case_to_block.find(node->begin) != case_to_block.end());
current_function_builder_->Emit(kExprBr);
current_function_builder_->EmitVarInt(1 + if_depth +
current_function_builder_->EmitVarUint(1 + if_depth +
case_to_block[node->begin]);
current_function_builder_->Emit(kExprEnd);
} else {
@ -394,21 +403,21 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
VisitVariableProxy(tag);
}
current_function_builder_->Emit(kExprBrTable);
current_function_builder_->EmitVarInt(node->end - node->begin + 1);
current_function_builder_->EmitVarUint(node->end - node->begin + 1);
for (int v = node->begin; v <= node->end; ++v) {
if (case_to_block.find(v) != case_to_block.end()) {
uint32_t target = if_depth + case_to_block[v];
current_function_builder_->EmitVarInt(target);
current_function_builder_->EmitVarUint(target);
} else {
uint32_t target = if_depth + default_block;
current_function_builder_->EmitVarInt(target);
current_function_builder_->EmitVarUint(target);
}
if (v == kMaxInt) {
break;
}
}
uint32_t target = if_depth + default_block;
current_function_builder_->EmitVarInt(target);
current_function_builder_->EmitVarUint(target);
}
while (if_depth-- != prev_if_depth) {
@ -425,7 +434,8 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
if (case_count == 0) {
return;
}
BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprBlock);
BlockVisitor visitor(this, stmt->AsBreakableStatement(), kExprBlock,
BreakTarget);
ZoneVector<BlockVisitor*> blocks(zone_);
ZoneVector<int32_t> cases(zone_);
ZoneMap<int, unsigned int> case_to_block(zone_);
@ -455,7 +465,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
if (root->left != nullptr || root->right != nullptr ||
root->begin == root->end) {
current_function_builder_->Emit(kExprBr);
current_function_builder_->EmitVarInt(default_block);
current_function_builder_->EmitVarUint(default_block);
}
}
for (int i = 0; i < case_count; ++i) {
@ -471,26 +481,28 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
void VisitDoWhileStatement(DoWhileStatement* stmt) {
DCHECK_EQ(kFuncScope, scope_);
BlockVisitor block(this, stmt->AsBreakableStatement(), kExprBlock);
BlockVisitor block(this, stmt->AsBreakableStatement(), kExprBlock,
BreakTarget);
BlockVisitor loop(this, stmt->AsBreakableStatement(), kExprLoop);
{
BlockVisitor inner_block(this, stmt->AsBreakableStatement(), kExprBlock,
ContinueTarget);
RECURSE(Visit(stmt->body()));
}
RECURSE(Visit(stmt->cond()));
current_function_builder_->EmitWithU8(kExprIf, kLocalVoid);
current_function_builder_->EmitWithU8(kExprBr, 1);
current_function_builder_->Emit(kExprEnd);
current_function_builder_->EmitWithU8(kExprBrIf, 0);
}
void VisitWhileStatement(WhileStatement* stmt) {
DCHECK_EQ(kFuncScope, scope_);
BlockVisitor block(this, stmt->AsBreakableStatement(), kExprBlock);
BlockVisitor loop(this, stmt->AsBreakableStatement(), kExprLoop);
BlockVisitor block(this, stmt->AsBreakableStatement(), kExprBlock,
BreakTarget);
BlockVisitor loop(this, stmt->AsBreakableStatement(), kExprLoop,
ContinueTarget);
RECURSE(Visit(stmt->cond()));
breakable_blocks_.push_back(std::make_pair(nullptr, false));
current_function_builder_->EmitWithU8(kExprIf, kLocalVoid);
BlockVisitor if_block(this, nullptr, kExprIf);
RECURSE(Visit(stmt->body()));
current_function_builder_->EmitWithU8(kExprBr, 1);
current_function_builder_->Emit(kExprEnd);
breakable_blocks_.pop_back();
}
void VisitForStatement(ForStatement* stmt) {
@ -498,8 +510,10 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
if (stmt->init() != nullptr) {
RECURSE(Visit(stmt->init()));
}
BlockVisitor block(this, stmt->AsBreakableStatement(), kExprBlock);
BlockVisitor loop(this, stmt->AsBreakableStatement(), kExprLoop);
BlockVisitor block(this, stmt->AsBreakableStatement(), kExprBlock,
BreakTarget);
BlockVisitor loop(this, stmt->AsBreakableStatement(), kExprLoop,
ContinueTarget);
if (stmt->cond() != nullptr) {
RECURSE(Visit(stmt->cond()));
current_function_builder_->Emit(kExprI32Eqz);
@ -557,8 +571,8 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
void VisitConditional(Conditional* expr) {
DCHECK_EQ(kFuncScope, scope_);
RECURSE(Visit(expr->condition()));
// WASM ifs come with implicit blocks for both arms.
breakable_blocks_.push_back(std::make_pair(nullptr, false));
// Wasm ifs come with implicit blocks for both arms.
breakable_blocks_.emplace_back(nullptr, NoTarget);
ValueTypeCode type;
switch (TypeOf(expr)) {
case kWasmI32:
@ -645,7 +659,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
ValueType var_type = TypeOf(expr);
DCHECK_NE(kWasmStmt, var_type);
if (var->IsContextSlot()) {
current_function_builder_->EmitWithVarInt(
current_function_builder_->EmitWithVarUint(
kExprGetGlobal, LookupOrInsertGlobal(var, var_type));
} else {
current_function_builder_->EmitGetLocal(
@ -671,35 +685,26 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
if (type->IsA(AsmType::Signed())) {
int32_t i = 0;
if (!value->ToInt32(&i)) {
UNREACHABLE();
}
byte code[] = {WASM_I32V(i)};
current_function_builder_->EmitCode(code, sizeof(code));
CHECK(value->ToInt32(&i));
current_function_builder_->EmitI32Const(i);
} else if (type->IsA(AsmType::Unsigned()) || type->IsA(AsmType::FixNum())) {
uint32_t u = 0;
if (!value->ToUint32(&u)) {
UNREACHABLE();
}
int32_t i = static_cast<int32_t>(u);
byte code[] = {WASM_I32V(i)};
current_function_builder_->EmitCode(code, sizeof(code));
CHECK(value->ToUint32(&u));
current_function_builder_->EmitI32Const(bit_cast<int32_t>(u));
} else if (type->IsA(AsmType::Int())) {
// The parser can collapse !0, !1 etc to true / false.
// Allow these as int literals.
if (expr->raw_value()->IsTrue()) {
byte code[] = {WASM_I32V(1)};
byte code[] = {WASM_ONE};
current_function_builder_->EmitCode(code, sizeof(code));
} else if (expr->raw_value()->IsFalse()) {
byte code[] = {WASM_I32V(0)};
byte code[] = {WASM_ZERO};
current_function_builder_->EmitCode(code, sizeof(code));
} else if (expr->raw_value()->IsNumber()) {
// This can happen when -x becomes x * -1 (due to the parser).
int32_t i = 0;
if (!value->ToInt32(&i) || i != -1) {
UNREACHABLE();
}
byte code[] = {WASM_I32V(i)};
CHECK(value->ToInt32(&i) && i == -1);
byte code[] = {WASM_I32V_1(-1)};
current_function_builder_->EmitCode(code, sizeof(code));
} else {
UNREACHABLE();
@ -949,9 +954,9 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
DCHECK_NE(kWasmStmt, var_type);
if (var->IsContextSlot()) {
uint32_t index = LookupOrInsertGlobal(var, var_type);
current_function_builder_->EmitWithVarInt(kExprSetGlobal, index);
current_function_builder_->EmitWithVarUint(kExprSetGlobal, index);
if (fate == kLeaveOnStack) {
current_function_builder_->EmitWithVarInt(kExprGetGlobal, index);
current_function_builder_->EmitWithVarUint(kExprGetGlobal, index);
}
} else {
if (fate == kDrop) {
@ -1461,7 +1466,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
int parent_pos = returns_value ? parent_binop->position() : pos;
current_function_builder_->AddAsmWasmOffset(pos, parent_pos);
current_function_builder_->Emit(kExprCallFunction);
current_function_builder_->EmitVarInt(index);
current_function_builder_->EmitVarUint(index);
} else {
WasmFunctionBuilder* function = LookupOrInsertFunction(vp->var());
VisitCallArgs(expr);
@ -1495,8 +1500,8 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
current_function_builder_->AddAsmWasmOffset(expr->position(),
expr->position());
current_function_builder_->Emit(kExprCallIndirect);
current_function_builder_->EmitVarInt(indices->signature_index);
current_function_builder_->EmitVarInt(0); // table index
current_function_builder_->EmitVarUint(indices->signature_index);
current_function_builder_->EmitVarUint(0); // table index
returns_value =
builder_->GetSignature(indices->signature_index)->return_count() >
0;
@ -1964,7 +1969,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
AsmTyper* typer_;
bool typer_failed_;
bool typer_finished_;
ZoneVector<std::pair<BreakableStatement*, bool>> breakable_blocks_;
ZoneVector<std::pair<BreakableStatement*, TargetType>> breakable_blocks_;
ZoneVector<ForeignVariable> foreign_variables_;
WasmFunctionBuilder* init_function_;
WasmFunctionBuilder* foreign_init_function_;
@ -1988,6 +1993,9 @@ AsmWasmBuilder::AsmWasmBuilder(CompilationInfo* info)
// TODO(aseemgarg): probably should take zone (to write wasm to) as input so
// that zone in constructor may be thrown away once wasm module is written.
AsmWasmBuilder::Result AsmWasmBuilder::Run(Handle<FixedArray>* foreign_args) {
HistogramTimerScope asm_wasm_time_scope(
info_->isolate()->counters()->asm_wasm_translation_time());
Zone* zone = info_->zone();
AsmWasmBuilderImpl impl(info_->isolate(), zone, info_,
info_->parse_info()->ast_value_factory(),

View File

@ -234,17 +234,6 @@ unsigned CpuFeatures::supported_ = 0;
unsigned CpuFeatures::icache_line_size_ = 0;
unsigned CpuFeatures::dcache_line_size_ = 0;
// -----------------------------------------------------------------------------
// Implementation of Label
int Label::pos() const {
if (pos_ < 0) return -pos_ - 1;
if (pos_ > 0) return pos_ - 1;
UNREACHABLE();
return 0;
}
// -----------------------------------------------------------------------------
// Implementation of RelocInfoWriter and RelocIterator
//
@ -319,25 +308,25 @@ const int kCodeWithIdTag = 0;
const int kDeoptReasonTag = 1;
void RelocInfo::update_wasm_memory_reference(
Address old_base, Address new_base, uint32_t old_size, uint32_t new_size,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsWasmMemoryReference(rmode_) || IsWasmMemorySizeReference(rmode_));
if (IsWasmMemoryReference(rmode_)) {
Address updated_reference;
Address old_base, Address new_base, ICacheFlushMode icache_flush_mode) {
DCHECK(IsWasmMemoryReference(rmode_));
DCHECK_GE(wasm_memory_reference(), old_base);
updated_reference = new_base + (wasm_memory_reference() - old_base);
Address updated_reference = new_base + (wasm_memory_reference() - old_base);
// The reference is not checked here but at runtime. Validity of references
// may change over time.
unchecked_update_wasm_memory_reference(updated_reference,
icache_flush_mode);
} else if (IsWasmMemorySizeReference(rmode_)) {
unchecked_update_wasm_memory_reference(updated_reference, icache_flush_mode);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(isolate_, pc_, sizeof(int64_t));
}
}
void RelocInfo::update_wasm_memory_size(uint32_t old_size, uint32_t new_size,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsWasmMemorySizeReference(rmode_));
uint32_t current_size_reference = wasm_memory_size_reference();
uint32_t updated_size_reference =
new_size + (current_size_reference - old_size);
unchecked_update_wasm_size(updated_size_reference, icache_flush_mode);
} else {
UNREACHABLE();
}
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(isolate_, pc_, sizeof(int64_t));
}
@ -488,7 +477,8 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
WriteData(rinfo->data());
} else if (RelocInfo::IsConstPool(rmode) ||
RelocInfo::IsVeneerPool(rmode) || RelocInfo::IsDeoptId(rmode) ||
RelocInfo::IsDeoptPosition(rmode)) {
RelocInfo::IsDeoptPosition(rmode) ||
RelocInfo::IsWasmProtectedLanding(rmode)) {
WriteIntData(static_cast<int>(rinfo->data()));
}
}
@ -637,7 +627,8 @@ void RelocIterator::next() {
} else if (RelocInfo::IsConstPool(rmode) ||
RelocInfo::IsVeneerPool(rmode) ||
RelocInfo::IsDeoptId(rmode) ||
RelocInfo::IsDeoptPosition(rmode)) {
RelocInfo::IsDeoptPosition(rmode) ||
RelocInfo::IsWasmProtectedLanding(rmode)) {
if (SetMode(rmode)) {
AdvanceReadInt();
return;
@ -734,8 +725,6 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "no reloc 64";
case EMBEDDED_OBJECT:
return "embedded object";
case DEBUGGER_STATEMENT:
return "debugger statement";
case CODE_TARGET:
return "code target";
case CODE_TARGET_WITH_ID:
@ -782,6 +771,8 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "wasm global value reference";
case WASM_FUNCTION_TABLE_SIZE_REFERENCE:
return "wasm function table size reference";
case WASM_PROTECTED_INSTRUCTION_LANDING:
return "wasm protected instruction landing";
case NUMBER_OF_MODES:
case PC_JUMP:
UNREACHABLE();
@ -841,7 +832,6 @@ void RelocInfo::Verify(Isolate* isolate) {
case CELL:
Object::VerifyPointer(target_cell());
break;
case DEBUGGER_STATEMENT:
case CODE_TARGET_WITH_ID:
case CODE_TARGET: {
// convert inline target address to code object
@ -880,6 +870,8 @@ void RelocInfo::Verify(Isolate* isolate) {
case WASM_MEMORY_SIZE_REFERENCE:
case WASM_GLOBAL_REFERENCE:
case WASM_FUNCTION_TABLE_SIZE_REFERENCE:
case WASM_PROTECTED_INSTRUCTION_LANDING:
// TODO(eholk): make sure the protected instruction is in range.
case NONE32:
case NONE64:
break;
@ -1575,8 +1567,9 @@ ExternalReference ExternalReference::is_tail_call_elimination_enabled_address(
return ExternalReference(isolate->is_tail_call_elimination_enabled_address());
}
ExternalReference ExternalReference::promise_hook_address(Isolate* isolate) {
return ExternalReference(isolate->promise_hook_address());
ExternalReference ExternalReference::promise_hook_or_debug_is_active_address(
Isolate* isolate) {
return ExternalReference(isolate->promise_hook_or_debug_is_active_address());
}
ExternalReference ExternalReference::debug_is_active_address(
@ -1589,12 +1582,6 @@ ExternalReference ExternalReference::debug_hook_on_function_call_address(
return ExternalReference(isolate->debug()->hook_on_function_call_address());
}
ExternalReference ExternalReference::debug_after_break_target_address(
Isolate* isolate) {
return ExternalReference(isolate->debug()->after_break_target_address());
}
ExternalReference ExternalReference::runtime_function_table_address(
Isolate* isolate) {
return ExternalReference(
@ -1675,6 +1662,11 @@ ExternalReference ExternalReference::debug_suspended_generator_address(
return ExternalReference(isolate->debug()->suspended_generator_address());
}
ExternalReference ExternalReference::debug_restart_fp_address(
Isolate* isolate) {
return ExternalReference(isolate->debug()->restart_fp_address());
}
ExternalReference ExternalReference::fixed_typed_array_base_data_offset() {
return ExternalReference(reinterpret_cast<void*>(
FixedTypedArrayBase::kDataOffset - kHeapObjectTag));

View File

@ -40,6 +40,7 @@
#include "src/deoptimize-reason.h"
#include "src/globals.h"
#include "src/isolate.h"
#include "src/label.h"
#include "src/log.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
@ -272,79 +273,6 @@ class CpuFeatures : public AllStatic {
};
// -----------------------------------------------------------------------------
// Labels represent pc locations; they are typically jump or call targets.
// After declaration, a label can be freely used to denote known or (yet)
// unknown pc location. Assembler::bind() is used to bind a label to the
// current pc. A label can be bound only once.
class Label {
public:
enum Distance {
kNear, kFar
};
INLINE(Label()) {
Unuse();
UnuseNear();
}
INLINE(~Label()) {
DCHECK(!is_linked());
DCHECK(!is_near_linked());
}
INLINE(void Unuse()) { pos_ = 0; }
INLINE(void UnuseNear()) { near_link_pos_ = 0; }
INLINE(bool is_bound() const) { return pos_ < 0; }
INLINE(bool is_unused() const) { return pos_ == 0 && near_link_pos_ == 0; }
INLINE(bool is_linked() const) { return pos_ > 0; }
INLINE(bool is_near_linked() const) { return near_link_pos_ > 0; }
// Returns the position of bound or linked labels. Cannot be used
// for unused labels.
int pos() const;
int near_link_pos() const { return near_link_pos_ - 1; }
private:
// pos_ encodes both the binding state (via its sign)
// and the binding position (via its value) of a label.
//
// pos_ < 0 bound label, pos() returns the jump target position
// pos_ == 0 unused label
// pos_ > 0 linked label, pos() returns the last reference position
int pos_;
// Behaves like |pos_| in the "> 0" case, but for near jumps to this label.
int near_link_pos_;
void bind_to(int pos) {
pos_ = -pos - 1;
DCHECK(is_bound());
}
void link_to(int pos, Distance distance = kFar) {
if (distance == kNear) {
near_link_pos_ = pos + 1;
DCHECK(is_near_linked());
} else {
pos_ = pos + 1;
DCHECK(is_linked());
}
}
friend class Assembler;
friend class Displacement;
friend class RegExpMacroAssemblerIrregexp;
#if V8_TARGET_ARCH_ARM64
// On ARM64, the Assembler keeps track of pointers to Labels to resolve
// branches to distant targets. Copying labels would confuse the Assembler.
DISALLOW_COPY_AND_ASSIGN(Label); // NOLINT
#endif
};
enum SaveFPRegsMode { kDontSaveFPRegs, kSaveFPRegs };
enum ArgvMode { kArgvOnStack, kArgvInRegister };
@ -389,13 +317,13 @@ class RelocInfo {
// Please note the order is important (see IsCodeTarget, IsGCRelocMode).
CODE_TARGET, // Code target which is not any of the above.
CODE_TARGET_WITH_ID,
DEBUGGER_STATEMENT, // Code target for the debugger statement.
EMBEDDED_OBJECT,
// To relocate pointers into the wasm memory embedded in wasm code
WASM_MEMORY_REFERENCE,
WASM_GLOBAL_REFERENCE,
WASM_MEMORY_SIZE_REFERENCE,
WASM_FUNCTION_TABLE_SIZE_REFERENCE,
WASM_PROTECTED_INSTRUCTION_LANDING,
CELL,
// Everything after runtime_entry (inclusive) is not GC'ed.
@ -437,7 +365,7 @@ class RelocInfo {
FIRST_REAL_RELOC_MODE = CODE_TARGET,
LAST_REAL_RELOC_MODE = VENEER_POOL,
LAST_CODE_ENUM = DEBUGGER_STATEMENT,
LAST_CODE_ENUM = CODE_TARGET_WITH_ID,
LAST_GCED_ENUM = WASM_FUNCTION_TABLE_SIZE_REFERENCE,
FIRST_SHAREABLE_RELOC_MODE = CELL,
};
@ -513,9 +441,6 @@ class RelocInfo {
static inline bool IsDebugBreakSlotAtTailCall(Mode mode) {
return mode == DEBUG_BREAK_SLOT_AT_TAIL_CALL;
}
static inline bool IsDebuggerStatement(Mode mode) {
return mode == DEBUGGER_STATEMENT;
}
static inline bool IsNone(Mode mode) {
return mode == NONE32 || mode == NONE64;
}
@ -546,6 +471,9 @@ class RelocInfo {
static inline bool IsWasmPtrReference(Mode mode) {
return mode == WASM_MEMORY_REFERENCE || mode == WASM_GLOBAL_REFERENCE;
}
static inline bool IsWasmProtectedLanding(Mode mode) {
return mode == WASM_PROTECTED_INSTRUCTION_LANDING;
}
static inline int ModeMask(Mode mode) { return 1 << mode; }
@ -578,7 +506,10 @@ class RelocInfo {
uint32_t wasm_function_table_size_reference();
uint32_t wasm_memory_size_reference();
void update_wasm_memory_reference(
Address old_base, Address new_base, uint32_t old_size, uint32_t new_size,
Address old_base, Address new_base,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void update_wasm_memory_size(
uint32_t old_size, uint32_t new_size,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void update_wasm_global_reference(
Address old_base, Address new_base,
@ -1069,7 +1000,8 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference invoke_function_callback(Isolate* isolate);
static ExternalReference invoke_accessor_getter_callback(Isolate* isolate);
static ExternalReference promise_hook_address(Isolate* isolate);
static ExternalReference promise_hook_or_debug_is_active_address(
Isolate* isolate);
V8_EXPORT_PRIVATE static ExternalReference runtime_function_table_address(
Isolate* isolate);
@ -1082,6 +1014,9 @@ class ExternalReference BASE_EMBEDDED {
// Used to check for suspended generator, used for stepping across await call.
static ExternalReference debug_suspended_generator_address(Isolate* isolate);
// Used to store the frame pointer to drop to when restarting a frame.
static ExternalReference debug_restart_fp_address(Isolate* isolate);
#ifndef V8_INTERPRETED_REGEXP
// C functions called from RegExp generated code.

View File

@ -6,7 +6,6 @@
#include "src/base/lazy-instance.h"
#include "src/base/platform/platform.h"
#include "src/debug/debug.h"
#include "src/isolate.h"
#include "src/utils.h"

View File

@ -5,5 +5,6 @@ bmeurer@chromium.org
littledan@chromium.org
marja@chromium.org
mstarzinger@chromium.org
neis@chromium.org
rossberg@chromium.org
verwaest@chromium.org

View File

@ -1,322 +0,0 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/ast/ast-literal-reindexer.h"
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
void AstLiteralReindexer::VisitVariableDeclaration(VariableDeclaration* node) {
VisitVariableProxy(node->proxy());
}
void AstLiteralReindexer::VisitEmptyStatement(EmptyStatement* node) {}
void AstLiteralReindexer::VisitSloppyBlockFunctionStatement(
SloppyBlockFunctionStatement* node) {
Visit(node->statement());
}
void AstLiteralReindexer::VisitContinueStatement(ContinueStatement* node) {}
void AstLiteralReindexer::VisitBreakStatement(BreakStatement* node) {}
void AstLiteralReindexer::VisitDebuggerStatement(DebuggerStatement* node) {}
void AstLiteralReindexer::VisitNativeFunctionLiteral(
NativeFunctionLiteral* node) {}
void AstLiteralReindexer::VisitDoExpression(DoExpression* node) {
Visit(node->block());
Visit(node->result());
}
void AstLiteralReindexer::VisitLiteral(Literal* node) {}
void AstLiteralReindexer::VisitRegExpLiteral(RegExpLiteral* node) {
UpdateIndex(node);
}
void AstLiteralReindexer::VisitVariableProxy(VariableProxy* node) {}
void AstLiteralReindexer::VisitThisFunction(ThisFunction* node) {}
void AstLiteralReindexer::VisitSuperPropertyReference(
SuperPropertyReference* node) {
Visit(node->this_var());
Visit(node->home_object());
}
void AstLiteralReindexer::VisitSuperCallReference(SuperCallReference* node) {
Visit(node->this_var());
Visit(node->new_target_var());
Visit(node->this_function_var());
}
void AstLiteralReindexer::VisitRewritableExpression(
RewritableExpression* node) {
Visit(node->expression());
}
void AstLiteralReindexer::VisitExpressionStatement(ExpressionStatement* node) {
Visit(node->expression());
}
void AstLiteralReindexer::VisitReturnStatement(ReturnStatement* node) {
Visit(node->expression());
}
void AstLiteralReindexer::VisitYield(Yield* node) {
Visit(node->generator_object());
Visit(node->expression());
}
void AstLiteralReindexer::VisitThrow(Throw* node) { Visit(node->exception()); }
void AstLiteralReindexer::VisitUnaryOperation(UnaryOperation* node) {
Visit(node->expression());
}
void AstLiteralReindexer::VisitCountOperation(CountOperation* node) {
Visit(node->expression());
}
void AstLiteralReindexer::VisitBlock(Block* node) {
VisitStatements(node->statements());
}
void AstLiteralReindexer::VisitFunctionDeclaration(FunctionDeclaration* node) {
VisitVariableProxy(node->proxy());
VisitFunctionLiteral(node->fun());
}
void AstLiteralReindexer::VisitCallRuntime(CallRuntime* node) {
VisitArguments(node->arguments());
}
void AstLiteralReindexer::VisitWithStatement(WithStatement* node) {
Visit(node->expression());
Visit(node->statement());
}
void AstLiteralReindexer::VisitDoWhileStatement(DoWhileStatement* node) {
Visit(node->body());
Visit(node->cond());
}
void AstLiteralReindexer::VisitWhileStatement(WhileStatement* node) {
Visit(node->cond());
Visit(node->body());
}
void AstLiteralReindexer::VisitTryCatchStatement(TryCatchStatement* node) {
Visit(node->try_block());
Visit(node->catch_block());
}
void AstLiteralReindexer::VisitTryFinallyStatement(TryFinallyStatement* node) {
Visit(node->try_block());
Visit(node->finally_block());
}
void AstLiteralReindexer::VisitProperty(Property* node) {
Visit(node->key());
Visit(node->obj());
}
void AstLiteralReindexer::VisitAssignment(Assignment* node) {
Visit(node->target());
Visit(node->value());
}
void AstLiteralReindexer::VisitBinaryOperation(BinaryOperation* node) {
Visit(node->left());
Visit(node->right());
}
void AstLiteralReindexer::VisitCompareOperation(CompareOperation* node) {
Visit(node->left());
Visit(node->right());
}
void AstLiteralReindexer::VisitSpread(Spread* node) {
// This is reachable because ParserBase::ParseArrowFunctionLiteral calls
// ReindexLiterals before calling RewriteDestructuringAssignments.
Visit(node->expression());
}
void AstLiteralReindexer::VisitEmptyParentheses(EmptyParentheses* node) {}
void AstLiteralReindexer::VisitGetIterator(GetIterator* node) {
Visit(node->iterable());
}
void AstLiteralReindexer::VisitForInStatement(ForInStatement* node) {
Visit(node->each());
Visit(node->enumerable());
Visit(node->body());
}
void AstLiteralReindexer::VisitForOfStatement(ForOfStatement* node) {
Visit(node->assign_iterator());
Visit(node->next_result());
Visit(node->result_done());
Visit(node->assign_each());
Visit(node->body());
}
void AstLiteralReindexer::VisitConditional(Conditional* node) {
Visit(node->condition());
Visit(node->then_expression());
Visit(node->else_expression());
}
void AstLiteralReindexer::VisitIfStatement(IfStatement* node) {
Visit(node->condition());
Visit(node->then_statement());
if (node->HasElseStatement()) {
Visit(node->else_statement());
}
}
void AstLiteralReindexer::VisitSwitchStatement(SwitchStatement* node) {
Visit(node->tag());
ZoneList<CaseClause*>* cases = node->cases();
for (int i = 0; i < cases->length(); i++) {
VisitCaseClause(cases->at(i));
}
}
void AstLiteralReindexer::VisitCaseClause(CaseClause* node) {
if (!node->is_default()) Visit(node->label());
VisitStatements(node->statements());
}
void AstLiteralReindexer::VisitForStatement(ForStatement* node) {
if (node->init() != NULL) Visit(node->init());
if (node->cond() != NULL) Visit(node->cond());
if (node->next() != NULL) Visit(node->next());
Visit(node->body());
}
void AstLiteralReindexer::VisitClassLiteral(ClassLiteral* node) {
if (node->extends()) Visit(node->extends());
if (node->constructor()) Visit(node->constructor());
if (node->class_variable_proxy()) {
VisitVariableProxy(node->class_variable_proxy());
}
for (int i = 0; i < node->properties()->length(); i++) {
VisitLiteralProperty(node->properties()->at(i));
}
}
void AstLiteralReindexer::VisitObjectLiteral(ObjectLiteral* node) {
UpdateIndex(node);
for (int i = 0; i < node->properties()->length(); i++) {
VisitLiteralProperty(node->properties()->at(i));
}
}
void AstLiteralReindexer::VisitLiteralProperty(LiteralProperty* node) {
Visit(node->key());
Visit(node->value());
}
void AstLiteralReindexer::VisitArrayLiteral(ArrayLiteral* node) {
UpdateIndex(node);
for (int i = 0; i < node->values()->length(); i++) {
Visit(node->values()->at(i));
}
}
void AstLiteralReindexer::VisitCall(Call* node) {
Visit(node->expression());
VisitArguments(node->arguments());
}
void AstLiteralReindexer::VisitCallNew(CallNew* node) {
Visit(node->expression());
VisitArguments(node->arguments());
}
void AstLiteralReindexer::VisitStatements(ZoneList<Statement*>* statements) {
if (statements == NULL) return;
for (int i = 0; i < statements->length(); i++) {
Visit(statements->at(i));
}
}
void AstLiteralReindexer::VisitDeclarations(
ZoneList<Declaration*>* declarations) {
for (int i = 0; i < declarations->length(); i++) {
Visit(declarations->at(i));
}
}
void AstLiteralReindexer::VisitArguments(ZoneList<Expression*>* arguments) {
for (int i = 0; i < arguments->length(); i++) {
Visit(arguments->at(i));
}
}
void AstLiteralReindexer::VisitFunctionLiteral(FunctionLiteral* node) {
// We don't recurse into the declarations or body of the function literal:
}
void AstLiteralReindexer::Reindex(Expression* pattern) { Visit(pattern); }
} // namespace internal
} // namespace v8

View File

@ -1,43 +0,0 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_AST_AST_LITERAL_REINDEXER
#define V8_AST_AST_LITERAL_REINDEXER
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
namespace v8 {
namespace internal {
class AstLiteralReindexer final : public AstVisitor<AstLiteralReindexer> {
public:
AstLiteralReindexer() : next_index_(0) {}
int count() const { return next_index_; }
void Reindex(Expression* pattern);
private:
#define DEFINE_VISIT(type) void Visit##type(type* node);
AST_NODE_LIST(DEFINE_VISIT)
#undef DEFINE_VISIT
void VisitStatements(ZoneList<Statement*>* statements);
void VisitDeclarations(ZoneList<Declaration*>* declarations);
void VisitArguments(ZoneList<Expression*>* arguments);
void VisitLiteralProperty(LiteralProperty* property);
void UpdateIndex(MaterializedLiteral* literal) {
literal->literal_index_ = next_index_++;
}
int next_index_;
DEFINE_AST_VISITOR_MEMBERS_WITHOUT_STACKOVERFLOW()
DISALLOW_COPY_AND_ASSIGN(AstLiteralReindexer);
};
} // namespace internal
} // namespace v8
#endif // V8_AST_AST_LITERAL_REINDEXER

View File

@ -21,6 +21,7 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
next_id_(BailoutId::FirstUsable().ToInt()),
yield_count_(0),
properties_(zone),
language_mode_(SLOPPY),
slot_cache_(zone),
disable_crankshaft_reason_(kNoReason),
dont_optimize_reason_(kNoReason),
@ -36,10 +37,12 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
AST_NODE_LIST(DEFINE_VISIT)
#undef DEFINE_VISIT
void VisitVariableProxy(VariableProxy* node, TypeofMode typeof_mode);
void VisitVariableProxyReference(VariableProxy* node);
void VisitPropertyReference(Property* node);
void VisitReference(Expression* expr);
void VisitStatementsAndDeclarations(Block* node);
void VisitStatements(ZoneList<Statement*>* statements);
void VisitDeclarations(Declaration::List* declarations);
void VisitArguments(ZoneList<Expression*>* arguments);
@ -66,9 +69,23 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
template <typename Node>
void ReserveFeedbackSlots(Node* node) {
node->AssignFeedbackVectorSlots(properties_.get_spec(), &slot_cache_);
node->AssignFeedbackSlots(properties_.get_spec(), language_mode_,
&slot_cache_);
}
class LanguageModeScope {
public:
LanguageModeScope(AstNumberingVisitor* visitor, LanguageMode language_mode)
: visitor_(visitor), outer_language_mode_(visitor->language_mode_) {
visitor_->language_mode_ = language_mode;
}
~LanguageModeScope() { visitor_->language_mode_ = outer_language_mode_; }
private:
AstNumberingVisitor* visitor_;
LanguageMode outer_language_mode_;
};
BailoutReason dont_optimize_reason() const { return dont_optimize_reason_; }
Zone* zone() const { return zone_; }
@ -78,8 +95,9 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
int next_id_;
int yield_count_;
AstProperties properties_;
// The slot cache allows us to reuse certain feedback vector slots.
FeedbackVectorSlotCache slot_cache_;
LanguageMode language_mode_;
// The slot cache allows us to reuse certain feedback slots.
FeedbackSlotCache slot_cache_;
BailoutReason disable_crankshaft_reason_;
BailoutReason dont_optimize_reason_;
HandlerTable::CatchPrediction catch_prediction_;
@ -119,8 +137,7 @@ void AstNumberingVisitor::VisitBreakStatement(BreakStatement* node) {
void AstNumberingVisitor::VisitDebuggerStatement(DebuggerStatement* node) {
IncrementNodeCount();
DisableOptimization(kDebuggerStatement);
node->set_base_id(ReserveIdRange(DebuggerStatement::num_ids()));
DisableFullCodegenAndCrankshaft(kDebuggerStatement);
}
@ -150,6 +167,7 @@ void AstNumberingVisitor::VisitLiteral(Literal* node) {
void AstNumberingVisitor::VisitRegExpLiteral(RegExpLiteral* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(RegExpLiteral::num_ids()));
ReserveFeedbackSlots(node);
}
@ -169,10 +187,14 @@ void AstNumberingVisitor::VisitVariableProxyReference(VariableProxy* node) {
node->set_base_id(ReserveIdRange(VariableProxy::num_ids()));
}
void AstNumberingVisitor::VisitVariableProxy(VariableProxy* node,
TypeofMode typeof_mode) {
VisitVariableProxyReference(node);
node->AssignFeedbackSlots(properties_.get_spec(), typeof_mode, &slot_cache_);
}
void AstNumberingVisitor::VisitVariableProxy(VariableProxy* node) {
VisitVariableProxyReference(node);
ReserveFeedbackSlots(node);
VisitVariableProxy(node, NOT_INSIDE_TYPEOF);
}
@ -237,7 +259,12 @@ void AstNumberingVisitor::VisitThrow(Throw* node) {
void AstNumberingVisitor::VisitUnaryOperation(UnaryOperation* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(UnaryOperation::num_ids()));
if ((node->op() == Token::TYPEOF) && node->expression()->IsVariableProxy()) {
VariableProxy* proxy = node->expression()->AsVariableProxy();
VisitVariableProxy(proxy, INSIDE_TYPEOF);
} else {
Visit(node->expression());
}
}
@ -252,10 +279,21 @@ void AstNumberingVisitor::VisitCountOperation(CountOperation* node) {
void AstNumberingVisitor::VisitBlock(Block* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(Block::num_ids()));
if (node->scope() != NULL) VisitDeclarations(node->scope()->declarations());
VisitStatements(node->statements());
Scope* scope = node->scope();
if (scope != nullptr) {
LanguageModeScope language_mode_scope(this, scope->language_mode());
VisitStatementsAndDeclarations(node);
} else {
VisitStatementsAndDeclarations(node);
}
}
void AstNumberingVisitor::VisitStatementsAndDeclarations(Block* node) {
Scope* scope = node->scope();
DCHECK(scope == nullptr || !scope->HasBeenRemoved());
if (scope) VisitDeclarations(scope->declarations());
VisitStatements(node->statements());
}
void AstNumberingVisitor::VisitFunctionDeclaration(FunctionDeclaration* node) {
IncrementNodeCount();
@ -323,6 +361,7 @@ void AstNumberingVisitor::VisitWhileStatement(WhileStatement* node) {
void AstNumberingVisitor::VisitTryCatchStatement(TryCatchStatement* node) {
DCHECK(node->scope() == nullptr || !node->scope()->HasBeenRemoved());
IncrementNodeCount();
DisableFullCodegenAndCrankshaft(kTryCatchStatement);
{
@ -406,8 +445,8 @@ void AstNumberingVisitor::VisitCompareOperation(CompareOperation* node) {
void AstNumberingVisitor::VisitSpread(Spread* node) {
IncrementNodeCount();
// We can only get here from super calls currently.
DisableFullCodegenAndCrankshaft(kSuperReference);
// We can only get here from spread calls currently.
DisableFullCodegenAndCrankshaft(kSpreadCall);
node->set_base_id(ReserveIdRange(Spread::num_ids()));
Visit(node->expression());
}
@ -595,12 +634,19 @@ void AstNumberingVisitor::VisitArguments(ZoneList<Expression*>* arguments) {
void AstNumberingVisitor::VisitFunctionLiteral(FunctionLiteral* node) {
IncrementNodeCount();
node->set_base_id(ReserveIdRange(FunctionLiteral::num_ids()));
if (eager_literals_ && node->ShouldEagerCompile()) {
if (node->ShouldEagerCompile()) {
if (eager_literals_) {
eager_literals_->Add(new (zone())
ThreadedListZoneEntry<FunctionLiteral*>(node));
}
// We don't recurse into the declarations or body of the function literal:
// you have to separately Renumber() each FunctionLiteral that you compile.
// If the function literal is being eagerly compiled, recurse into the
// declarations and body of the function literal.
if (!AstNumbering::Renumber(stack_limit_, zone_, node, eager_literals_)) {
SetStackOverflow();
return;
}
}
ReserveFeedbackSlots(node);
}
@ -615,6 +661,8 @@ void AstNumberingVisitor::VisitRewritableExpression(
bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
DeclarationScope* scope = node->scope();
DCHECK(!scope->HasBeenRemoved());
if (scope->new_target_var() != nullptr ||
scope->this_function_var() != nullptr) {
DisableFullCodegenAndCrankshaft(kSuperReference);
@ -637,6 +685,8 @@ bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
DisableFullCodegenAndCrankshaft(kClassConstructorFunction);
}
LanguageModeScope language_mode_scope(this, node->language_mode());
VisitDeclarations(scope->declarations());
VisitStatements(node->body());
@ -646,6 +696,13 @@ bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
if (FLAG_trace_opt) {
if (disable_crankshaft_reason_ != kNoReason) {
// TODO(leszeks): This is a quick'n'dirty fix to allow the debug name of
// the function to be accessed in the below print. This DCHECK will fail
// if we move ast numbering off the main thread, but that won't be before
// we remove FCG, in which case this entire check isn't necessary anyway.
AllowHandleDereference allow_deref;
DCHECK(!node->debug_name().is_null());
PrintF("[enforcing Ignition and TurboFan for %s because: %s\n",
node->debug_name()->ToCString().get(),
GetBailoutReason(disable_crankshaft_reason_));

View File

@ -157,6 +157,8 @@ AstType::bitset AstBitsetType::Lub(i::Map* map) {
case ONE_BYTE_STRING_TYPE:
case CONS_STRING_TYPE:
case CONS_ONE_BYTE_STRING_TYPE:
case THIN_STRING_TYPE:
case THIN_ONE_BYTE_STRING_TYPE:
case SLICED_STRING_TYPE:
case SLICED_ONE_BYTE_STRING_TYPE:
case EXTERNAL_STRING_TYPE:
@ -193,8 +195,6 @@ AstType::bitset AstBitsetType::Lub(i::Map* map) {
}
case HEAP_NUMBER_TYPE:
return kNumber & kTaggedPointer;
case SIMD128_VALUE_TYPE:
return kSimd;
case JS_OBJECT_TYPE:
case JS_ARGUMENTS_TYPE:
case JS_ERROR_TYPE:
@ -220,6 +220,7 @@ AstType::bitset AstBitsetType::Lub(i::Map* map) {
case JS_SET_ITERATOR_TYPE:
case JS_MAP_ITERATOR_TYPE:
case JS_STRING_ITERATOR_TYPE:
case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
case JS_TYPED_ARRAY_KEY_ITERATOR_TYPE:
case JS_FAST_ARRAY_KEY_ITERATOR_TYPE:
@ -308,7 +309,6 @@ AstType::bitset AstBitsetType::Lub(i::Map* map) {
case ALLOCATION_MEMENTO_TYPE:
case TYPE_FEEDBACK_INFO_TYPE:
case ALIASED_ARGUMENTS_ENTRY_TYPE:
case BOX_TYPE:
case DEBUG_INFO_TYPE:
case BREAK_POINT_INFO_TYPE:
case CELL_TYPE:
@ -1296,13 +1296,6 @@ AstBitsetType::bitset AstBitsetType::UnsignedSmall() {
return i::SmiValuesAre31Bits() ? kUnsigned30 : kUnsigned31;
}
#define CONSTRUCT_SIMD_TYPE(NAME, Name, name, lane_count, lane_type) \
AstType* AstType::Name(Isolate* isolate, Zone* zone) { \
return Class(i::handle(isolate->heap()->name##_map()), zone); \
}
SIMD128_TYPES(CONSTRUCT_SIMD_TYPE)
#undef CONSTRUCT_SIMD_TYPE
// -----------------------------------------------------------------------------
// Instantiations.

View File

@ -156,15 +156,15 @@ namespace internal {
#define AST_REPRESENTATION(k) ((k) & AstBitsetType::kRepresentation)
#define AST_SEMANTIC(k) ((k) & AstBitsetType::kSemantic)
// Bits 21-22 are available.
#define AST_REPRESENTATION_BITSET_TYPE_LIST(V) \
V(None, 0) \
V(UntaggedBit, 1u << 22 | kSemantic) \
V(UntaggedIntegral8, 1u << 23 | kSemantic) \
V(UntaggedIntegral16, 1u << 24 | kSemantic) \
V(UntaggedIntegral32, 1u << 25 | kSemantic) \
V(UntaggedFloat32, 1u << 26 | kSemantic) \
V(UntaggedFloat64, 1u << 27 | kSemantic) \
V(UntaggedSimd128, 1u << 28 | kSemantic) \
V(UntaggedBit, 1u << 23 | kSemantic) \
V(UntaggedIntegral8, 1u << 24 | kSemantic) \
V(UntaggedIntegral16, 1u << 25 | kSemantic) \
V(UntaggedIntegral32, 1u << 26 | kSemantic) \
V(UntaggedFloat32, 1u << 27 | kSemantic) \
V(UntaggedFloat64, 1u << 28 | kSemantic) \
V(UntaggedPointer, 1u << 29 | kSemantic) \
V(TaggedSigned, 1u << 30 | kSemantic) \
V(TaggedPointer, 1u << 31 | kSemantic) \
@ -197,13 +197,12 @@ namespace internal {
V(Symbol, 1u << 12 | AST_REPRESENTATION(kTaggedPointer)) \
V(InternalizedString, 1u << 13 | AST_REPRESENTATION(kTaggedPointer)) \
V(OtherString, 1u << 14 | AST_REPRESENTATION(kTaggedPointer)) \
V(Simd, 1u << 15 | AST_REPRESENTATION(kTaggedPointer)) \
V(OtherObject, 1u << 17 | AST_REPRESENTATION(kTaggedPointer)) \
V(OtherObject, 1u << 15 | AST_REPRESENTATION(kTaggedPointer)) \
V(OtherUndetectable, 1u << 16 | AST_REPRESENTATION(kTaggedPointer)) \
V(Proxy, 1u << 18 | AST_REPRESENTATION(kTaggedPointer)) \
V(Function, 1u << 19 | AST_REPRESENTATION(kTaggedPointer)) \
V(Hole, 1u << 20 | AST_REPRESENTATION(kTaggedPointer)) \
V(OtherInternal, 1u << 21 | \
V(Proxy, 1u << 17 | AST_REPRESENTATION(kTaggedPointer)) \
V(Function, 1u << 18 | AST_REPRESENTATION(kTaggedPointer)) \
V(Hole, 1u << 19 | AST_REPRESENTATION(kTaggedPointer)) \
V(OtherInternal, 1u << 20 | \
AST_REPRESENTATION(kTagged | kUntagged)) \
\
V(Signed31, kUnsigned30 | kNegative31) \
@ -232,11 +231,10 @@ namespace internal {
V(NullOrUndefined, kNull | kUndefined) \
V(Undetectable, kNullOrUndefined | kOtherUndetectable) \
V(NumberOrOddball, kNumber | kNullOrUndefined | kBoolean | kHole) \
V(NumberOrSimdOrString, kNumber | kSimd | kString) \
V(NumberOrString, kNumber | kString) \
V(NumberOrUndefined, kNumber | kUndefined) \
V(PlainPrimitive, kNumberOrString | kBoolean | kNullOrUndefined) \
V(Primitive, kSymbol | kSimd | kPlainPrimitive) \
V(Primitive, kSymbol | kPlainPrimitive) \
V(DetectableReceiver, kFunction | kOtherObject | kProxy) \
V(Object, kFunction | kOtherObject | kOtherUndetectable) \
V(Receiver, kObject | kProxy) \
@ -770,11 +768,6 @@ class AstType {
return tuple;
}
#define CONSTRUCT_SIMD_TYPE(NAME, Name, name, lane_count, lane_type) \
static AstType* Name(Isolate* isolate, Zone* zone);
SIMD128_TYPES(CONSTRUCT_SIMD_TYPE)
#undef CONSTRUCT_SIMD_TYPE
static AstType* Union(AstType* type1, AstType* type2, Zone* zone);
static AstType* Intersect(AstType* type1, AstType* type2, Zone* zone);

View File

@ -129,6 +129,36 @@ bool AstRawString::IsOneByteEqualTo(const char* data) const {
return false;
}
bool AstRawString::Compare(void* a, void* b) {
const AstRawString* lhs = static_cast<AstRawString*>(a);
const AstRawString* rhs = static_cast<AstRawString*>(b);
DCHECK_EQ(lhs->hash(), rhs->hash());
if (lhs->length() != rhs->length()) return false;
const unsigned char* l = lhs->raw_data();
const unsigned char* r = rhs->raw_data();
size_t length = rhs->length();
if (lhs->is_one_byte()) {
if (rhs->is_one_byte()) {
return CompareCharsUnsigned(reinterpret_cast<const uint8_t*>(l),
reinterpret_cast<const uint8_t*>(r),
length) == 0;
} else {
return CompareCharsUnsigned(reinterpret_cast<const uint8_t*>(l),
reinterpret_cast<const uint16_t*>(r),
length) == 0;
}
} else {
if (rhs->is_one_byte()) {
return CompareCharsUnsigned(reinterpret_cast<const uint16_t*>(l),
reinterpret_cast<const uint8_t*>(r),
length) == 0;
} else {
return CompareCharsUnsigned(reinterpret_cast<const uint16_t*>(l),
reinterpret_cast<const uint16_t*>(r),
length) == 0;
}
}
}
void AstConsString::Internalize(Isolate* isolate) {
// AstRawStrings are internalized before AstConsStrings so left and right are
@ -184,14 +214,10 @@ void AstValue::Internalize(Isolate* isolate) {
DCHECK(!string_->string().is_null());
break;
case SYMBOL:
if (symbol_name_[0] == 'i') {
DCHECK_EQ(0, strcmp(symbol_name_, "iterator_symbol"));
set_value(isolate->factory()->iterator_symbol());
} else if (strcmp(symbol_name_, "hasInstance_symbol") == 0) {
set_value(isolate->factory()->has_instance_symbol());
} else {
DCHECK_EQ(0, strcmp(symbol_name_, "home_object_symbol"));
switch (symbol_) {
case AstSymbol::kHomeObjectSymbol:
set_value(isolate->factory()->home_object_symbol());
break;
}
break;
case NUMBER_WITH_DOT:
@ -295,9 +321,8 @@ const AstValue* AstValueFactory::NewString(const AstRawString* string) {
return AddValue(value);
}
const AstValue* AstValueFactory::NewSymbol(const char* name) {
AstValue* value = new (zone_) AstValue(name);
const AstValue* AstValueFactory::NewSymbol(AstSymbol symbol) {
AstValue* value = new (zone_) AstValue(symbol);
return AddValue(value);
}
@ -356,7 +381,7 @@ AstRawString* AstValueFactory::GetString(uint32_t hash, bool is_one_byte,
// return this AstRawString.
AstRawString key(is_one_byte, literal_bytes, hash);
base::HashMap::Entry* entry = string_table_.LookupOrInsert(&key, hash);
if (entry->value == NULL) {
if (entry->value == nullptr) {
// Copy literal contents for later comparison.
int length = literal_bytes.length();
byte* new_literal_bytes = zone_->NewArray<byte>(length);
@ -371,36 +396,5 @@ AstRawString* AstValueFactory::GetString(uint32_t hash, bool is_one_byte,
return reinterpret_cast<AstRawString*>(entry->key);
}
bool AstValueFactory::AstRawStringCompare(void* a, void* b) {
const AstRawString* lhs = static_cast<AstRawString*>(a);
const AstRawString* rhs = static_cast<AstRawString*>(b);
DCHECK_EQ(lhs->hash(), rhs->hash());
if (lhs->length() != rhs->length()) return false;
const unsigned char* l = lhs->raw_data();
const unsigned char* r = rhs->raw_data();
size_t length = rhs->length();
if (lhs->is_one_byte()) {
if (rhs->is_one_byte()) {
return CompareCharsUnsigned(reinterpret_cast<const uint8_t*>(l),
reinterpret_cast<const uint8_t*>(r),
length) == 0;
} else {
return CompareCharsUnsigned(reinterpret_cast<const uint8_t*>(l),
reinterpret_cast<const uint16_t*>(r),
length) == 0;
}
} else {
if (rhs->is_one_byte()) {
return CompareCharsUnsigned(reinterpret_cast<const uint16_t*>(l),
reinterpret_cast<const uint8_t*>(r),
length) == 0;
} else {
return CompareCharsUnsigned(reinterpret_cast<const uint16_t*>(l),
reinterpret_cast<const uint16_t*>(r),
length) == 0;
}
}
}
} // namespace internal
} // namespace v8

View File

@ -28,10 +28,11 @@
#ifndef V8_AST_AST_VALUE_FACTORY_H_
#define V8_AST_AST_VALUE_FACTORY_H_
#include "src/api.h"
#include "src/base/hashmap.h"
#include "src/conversions.h"
#include "src/factory.h"
#include "src/globals.h"
#include "src/isolate.h"
#include "src/utils.h"
// AstString, AstValue and AstValueFactory are for storing strings and values
@ -105,6 +106,8 @@ class AstRawString final : public AstString {
return *c;
}
static bool Compare(void* a, void* b);
// For storing AstRawStrings in a hash map.
uint32_t hash() const {
return hash_;
@ -151,15 +154,18 @@ class AstConsString final : public AstString {
const AstString* right_;
};
enum class AstSymbol : uint8_t { kHomeObjectSymbol };
// AstValue is either a string, a number, a string array, a boolean, or a
// special value (null, undefined, the hole).
// AstValue is either a string, a symbol, a number, a string array, a boolean,
// or a special value (null, undefined, the hole).
class AstValue : public ZoneObject {
public:
bool IsString() const {
return type_ == STRING;
}
bool IsSymbol() const { return type_ == SYMBOL; }
bool IsNumber() const { return IsSmi() || IsHeapNumber(); }
bool ContainsDot() const {
@ -171,6 +177,11 @@ class AstValue : public ZoneObject {
return string_;
}
AstSymbol AsSymbol() const {
CHECK_EQ(SYMBOL, type_);
return symbol_;
}
double AsNumber() const {
if (IsHeapNumber()) return number_;
if (IsSmi()) return smi_;
@ -248,8 +259,8 @@ class AstValue : public ZoneObject {
string_ = s;
}
explicit AstValue(const char* name) : type_(SYMBOL), next_(nullptr) {
symbol_name_ = name;
explicit AstValue(AstSymbol symbol) : type_(SYMBOL), next_(nullptr) {
symbol_ = symbol;
}
explicit AstValue(double n, bool with_dot) : next_(nullptr) {
@ -289,7 +300,7 @@ class AstValue : public ZoneObject {
double number_;
int smi_;
bool bool_;
const char* symbol_name_;
AstSymbol symbol_;
};
};
@ -335,7 +346,9 @@ class AstValue : public ZoneObject {
class AstStringConstants final {
public:
AstStringConstants(Isolate* isolate, uint32_t hash_seed)
: zone_(isolate->allocator(), ZONE_NAME), hash_seed_(hash_seed) {
: zone_(isolate->allocator(), ZONE_NAME),
string_table_(AstRawString::Compare),
hash_seed_(hash_seed) {
DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
#define F(name, str) \
{ \
@ -348,20 +361,28 @@ class AstStringConstants final {
/* The Handle returned by the factory is located on the roots */ \
/* array, not on the temporary HandleScope, so this is safe. */ \
name##_string_->set_string(isolate->factory()->name##_string()); \
base::HashMap::Entry* entry = \
string_table_.InsertNew(name##_string_, name##_string_->hash()); \
DCHECK(entry->value == nullptr); \
entry->value = reinterpret_cast<void*>(1); \
}
STRING_CONSTANTS(F)
#undef F
}
#define F(name, str) \
AstRawString* name##_string() { return name##_string_; }
const AstRawString* name##_string() const { return name##_string_; }
STRING_CONSTANTS(F)
#undef F
uint32_t hash_seed() const { return hash_seed_; }
const base::CustomMatcherHashMap* string_table() const {
return &string_table_;
}
private:
Zone zone_;
base::CustomMatcherHashMap string_table_;
uint32_t hash_seed_;
#define F(name, str) AstRawString* name##_string_;
@ -380,9 +401,9 @@ class AstStringConstants final {
class AstValueFactory {
public:
AstValueFactory(Zone* zone, AstStringConstants* string_constants,
AstValueFactory(Zone* zone, const AstStringConstants* string_constants,
uint32_t hash_seed)
: string_table_(AstRawStringCompare),
: string_table_(string_constants->string_table()),
values_(nullptr),
strings_(nullptr),
strings_end_(&strings_),
@ -397,7 +418,6 @@ class AstValueFactory {
std::fill(one_character_strings_,
one_character_strings_ + arraysize(one_character_strings_),
nullptr);
InitializeStringConstants();
}
Zone* zone() const { return zone_; }
@ -416,7 +436,7 @@ class AstValueFactory {
const AstConsString* NewConsString(const AstString* left,
const AstString* right);
void Internalize(Isolate* isolate);
V8_EXPORT_PRIVATE void Internalize(Isolate* isolate);
#define F(name, str) \
const AstRawString* name##_string() { \
@ -425,10 +445,11 @@ class AstValueFactory {
STRING_CONSTANTS(F)
#undef F
const AstValue* NewString(const AstRawString* string);
V8_EXPORT_PRIVATE const AstValue* NewString(const AstRawString* string);
// A JavaScript symbol (ECMA-262 edition 6).
const AstValue* NewSymbol(const char* name);
const AstValue* NewNumber(double number, bool with_dot = false);
const AstValue* NewSymbol(AstSymbol symbol);
V8_EXPORT_PRIVATE const AstValue* NewNumber(double number,
bool with_dot = false);
const AstValue* NewSmi(uint32_t number);
const AstValue* NewBoolean(bool b);
const AstValue* NewStringList(ZoneList<const AstRawString*>* strings);
@ -461,19 +482,6 @@ class AstValueFactory {
AstRawString* GetString(uint32_t hash, bool is_one_byte,
Vector<const byte> literal_bytes);
void InitializeStringConstants() {
#define F(name, str) \
AstRawString* raw_string_##name = string_constants_->name##_string(); \
base::HashMap::Entry* entry_##name = string_table_.LookupOrInsert( \
raw_string_##name, raw_string_##name->hash()); \
DCHECK(entry_##name->value == nullptr); \
entry_##name->value = reinterpret_cast<void*>(1);
STRING_CONSTANTS(F)
#undef F
}
static bool AstRawStringCompare(void* a, void* b);
// All strings are copied here, one after another (no NULLs inbetween).
base::CustomMatcherHashMap string_table_;
// For keeping track of all AstValues and AstRawStrings we've created (so that
@ -486,7 +494,7 @@ class AstValueFactory {
AstString** strings_end_;
// Holds constant string values which are shared across the isolate.
AstStringConstants* string_constants_;
const AstStringConstants* string_constants_;
// Caches for faster access: small numbers, one character lowercase strings
// (for minified code).

186
deps/v8/src/ast/ast.cc vendored
View File

@ -15,7 +15,10 @@
#include "src/code-stubs.h"
#include "src/contexts.h"
#include "src/conversions.h"
#include "src/double.h"
#include "src/elements.h"
#include "src/objects-inl.h"
#include "src/objects/literal-objects.h"
#include "src/property-details.h"
#include "src/property.h"
#include "src/string-stream.h"
@ -29,6 +32,22 @@ namespace internal {
#ifdef DEBUG
static const char* NameForNativeContextIntrinsicIndex(uint32_t idx) {
switch (idx) {
#define NATIVE_CONTEXT_FIELDS_IDX(NAME, Type, name) \
case Context::NAME: \
return #name;
NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELDS_IDX)
#undef NATIVE_CONTEXT_FIELDS_IDX
default:
break;
}
return "UnknownIntrinsicIndex";
}
void AstNode::Print() { Print(Isolate::Current()); }
void AstNode::Print(Isolate* isolate) {
@ -202,47 +221,51 @@ void VariableProxy::BindTo(Variable* var) {
set_var(var);
set_is_resolved();
var->set_is_used();
if (is_assigned()) var->set_maybe_assigned();
}
void VariableProxy::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
void VariableProxy::AssignFeedbackSlots(FeedbackVectorSpec* spec,
TypeofMode typeof_mode,
FeedbackSlotCache* cache) {
if (UsesVariableFeedbackSlot()) {
// VariableProxies that point to the same Variable within a function can
// make their loads from the same IC slot.
if (var()->IsUnallocated() || var()->mode() == DYNAMIC_GLOBAL) {
ZoneHashMap::Entry* entry = cache->Get(var());
if (entry != NULL) {
variable_feedback_slot_ = FeedbackVectorSlot(
static_cast<int>(reinterpret_cast<intptr_t>(entry->value)));
FeedbackSlot slot = cache->Get(typeof_mode, var());
if (!slot.IsInvalid()) {
variable_feedback_slot_ = slot;
return;
}
variable_feedback_slot_ = spec->AddLoadGlobalICSlot();
cache->Put(var(), variable_feedback_slot_);
variable_feedback_slot_ = spec->AddLoadGlobalICSlot(typeof_mode);
cache->Put(typeof_mode, var(), variable_feedback_slot_);
} else {
variable_feedback_slot_ = spec->AddLoadICSlot();
}
}
}
static void AssignVectorSlots(Expression* expr, FeedbackVectorSpec* spec,
FeedbackVectorSlot* out_slot) {
LanguageMode language_mode,
FeedbackSlot* out_slot) {
Property* property = expr->AsProperty();
LhsKind assign_type = Property::GetAssignType(property);
if ((assign_type == VARIABLE &&
expr->AsVariableProxy()->var()->IsUnallocated()) ||
assign_type == NAMED_PROPERTY || assign_type == KEYED_PROPERTY) {
// TODO(ishell): consider using ICSlotCache for variables here.
FeedbackVectorSlotKind kind = assign_type == KEYED_PROPERTY
? FeedbackVectorSlotKind::KEYED_STORE_IC
: FeedbackVectorSlotKind::STORE_IC;
*out_slot = spec->AddSlot(kind);
if (assign_type == KEYED_PROPERTY) {
*out_slot = spec->AddKeyedStoreICSlot(language_mode);
} else {
*out_slot = spec->AddStoreICSlot(language_mode);
}
}
}
void ForInStatement::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
AssignVectorSlots(each(), spec, &each_slot_);
void ForInStatement::AssignFeedbackSlots(FeedbackVectorSpec* spec,
LanguageMode language_mode,
FeedbackSlotCache* cache) {
AssignVectorSlots(each(), spec, language_mode, &each_slot_);
for_in_feedback_slot_ = spec->AddGeneralSlot();
}
@ -257,14 +280,16 @@ Assignment::Assignment(Token::Value op, Expression* target, Expression* value,
StoreModeField::encode(STANDARD_STORE) | TokenField::encode(op);
}
void Assignment::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
AssignVectorSlots(target(), spec, &slot_);
void Assignment::AssignFeedbackSlots(FeedbackVectorSpec* spec,
LanguageMode language_mode,
FeedbackSlotCache* cache) {
AssignVectorSlots(target(), spec, language_mode, &slot_);
}
void CountOperation::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
AssignVectorSlots(expression(), spec, &slot_);
void CountOperation::AssignFeedbackSlots(FeedbackVectorSpec* spec,
LanguageMode language_mode,
FeedbackSlotCache* cache) {
AssignVectorSlots(expression(), spec, language_mode, &slot_);
// Assign a slot to collect feedback about binary operations. Used only in
// ignition. Fullcodegen uses AstId to record type feedback.
binary_operation_slot_ = spec->AddInterpreterBinaryOpICSlot();
@ -347,12 +372,12 @@ ObjectLiteralProperty::ObjectLiteralProperty(AstValueFactory* ast_value_factory,
}
}
FeedbackVectorSlot LiteralProperty::GetStoreDataPropertySlot() const {
FeedbackSlot LiteralProperty::GetStoreDataPropertySlot() const {
int offset = FunctionLiteral::NeedsHomeObject(value_) ? 1 : 0;
return GetSlot(offset);
}
void LiteralProperty::SetStoreDataPropertySlot(FeedbackVectorSlot slot) {
void LiteralProperty::SetStoreDataPropertySlot(FeedbackSlot slot) {
int offset = FunctionLiteral::NeedsHomeObject(value_) ? 1 : 0;
return SetSlot(slot, offset);
}
@ -371,23 +396,24 @@ ClassLiteralProperty::ClassLiteralProperty(Expression* key, Expression* value,
kind_(kind),
is_static_(is_static) {}
void ClassLiteral::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
void ClassLiteral::AssignFeedbackSlots(FeedbackVectorSpec* spec,
LanguageMode language_mode,
FeedbackSlotCache* cache) {
// This logic that computes the number of slots needed for vector store
// ICs must mirror BytecodeGenerator::VisitClassLiteral.
if (FunctionLiteral::NeedsHomeObject(constructor())) {
home_object_slot_ = spec->AddStoreICSlot();
home_object_slot_ = spec->AddStoreICSlot(language_mode);
}
if (NeedsProxySlot()) {
proxy_slot_ = spec->AddStoreICSlot();
proxy_slot_ = spec->AddStoreICSlot(language_mode);
}
for (int i = 0; i < properties()->length(); i++) {
ClassLiteral::Property* property = properties()->at(i);
Expression* value = property->value();
if (FunctionLiteral::NeedsHomeObject(value)) {
property->SetSlot(spec->AddStoreICSlot());
property->SetSlot(spec->AddStoreICSlot(language_mode));
}
property->SetStoreDataPropertySlot(
spec->AddStoreDataPropertyInLiteralICSlot());
@ -407,8 +433,11 @@ void ObjectLiteral::Property::set_emit_store(bool emit_store) {
bool ObjectLiteral::Property::emit_store() const { return emit_store_; }
void ObjectLiteral::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
void ObjectLiteral::AssignFeedbackSlots(FeedbackVectorSpec* spec,
LanguageMode language_mode,
FeedbackSlotCache* cache) {
MaterializedLiteral::AssignFeedbackSlots(spec, language_mode, cache);
// This logic that computes the number of slots needed for vector store
// ics must mirror FullCodeGenerator::VisitObjectLiteral.
int property_index = 0;
@ -430,27 +459,27 @@ void ObjectLiteral::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
// contains computed properties with an uninitialized value.
if (key->IsStringLiteral()) {
if (property->emit_store()) {
property->SetSlot(spec->AddStoreICSlot());
property->SetSlot(spec->AddStoreOwnICSlot());
if (FunctionLiteral::NeedsHomeObject(value)) {
property->SetSlot(spec->AddStoreICSlot(), 1);
property->SetSlot(spec->AddStoreICSlot(language_mode), 1);
}
}
break;
}
if (property->emit_store() && FunctionLiteral::NeedsHomeObject(value)) {
property->SetSlot(spec->AddStoreICSlot());
property->SetSlot(spec->AddStoreICSlot(language_mode));
}
break;
case ObjectLiteral::Property::PROTOTYPE:
break;
case ObjectLiteral::Property::GETTER:
if (property->emit_store() && FunctionLiteral::NeedsHomeObject(value)) {
property->SetSlot(spec->AddStoreICSlot());
property->SetSlot(spec->AddStoreICSlot(language_mode));
}
break;
case ObjectLiteral::Property::SETTER:
if (property->emit_store() && FunctionLiteral::NeedsHomeObject(value)) {
property->SetSlot(spec->AddStoreICSlot());
property->SetSlot(spec->AddStoreICSlot(language_mode));
}
break;
}
@ -462,7 +491,7 @@ void ObjectLiteral::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
Expression* value = property->value();
if (property->kind() != ObjectLiteral::Property::PROTOTYPE) {
if (FunctionLiteral::NeedsHomeObject(value)) {
property->SetSlot(spec->AddStoreICSlot());
property->SetSlot(spec->AddStoreICSlot(language_mode));
}
}
property->SetStoreDataPropertySlot(
@ -582,9 +611,31 @@ void ObjectLiteral::InitDepthAndFlags() {
void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
if (!constant_properties_.is_null()) return;
// Allocate a fixed array to hold all the constant properties.
Handle<FixedArray> constant_properties =
isolate->factory()->NewFixedArray(boilerplate_properties_ * 2, TENURED);
int index_keys = 0;
bool has_seen_proto = false;
for (int i = 0; i < properties()->length(); i++) {
ObjectLiteral::Property* property = properties()->at(i);
if (!IsBoilerplateProperty(property)) {
has_seen_proto = true;
continue;
}
if (property->is_computed_name()) {
continue;
}
Handle<Object> key = property->key()->AsLiteral()->value();
uint32_t element_index = 0;
if (key->ToArrayIndex(&element_index) ||
(key->IsString() && String::cast(*key)->AsArrayIndex(&element_index))) {
index_keys++;
}
}
Handle<BoilerplateDescription> constant_properties =
isolate->factory()->NewBoilerplateDescription(boilerplate_properties_,
properties()->length(),
index_keys, has_seen_proto);
int position = 0;
for (int i = 0; i < properties()->length(); i++) {
@ -634,6 +685,10 @@ bool ObjectLiteral::IsFastCloningSupported() const {
kMaximumClonedShallowObjectProperties;
}
ElementsKind ArrayLiteral::constant_elements_kind() const {
return static_cast<ElementsKind>(constant_elements()->elements_kind());
}
void ArrayLiteral::InitDepthAndFlags() {
DCHECK_LT(first_spread_index_, 0);
@ -734,8 +789,16 @@ bool ArrayLiteral::IsFastCloningSupported() const {
ConstructorBuiltinsAssembler::kMaximumClonedShallowArrayElements;
}
void ArrayLiteral::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
void ArrayLiteral::RewindSpreads() {
values_->Rewind(first_spread_index_);
first_spread_index_ = -1;
}
void ArrayLiteral::AssignFeedbackSlots(FeedbackVectorSpec* spec,
LanguageMode language_mode,
FeedbackSlotCache* cache) {
MaterializedLiteral::AssignFeedbackSlots(spec, language_mode, cache);
// This logic that computes the number of slots needed for vector store
// ics must mirror FullCodeGenerator::VisitArrayLiteral.
for (int array_index = 0; array_index < values()->length(); array_index++) {
@ -745,7 +808,7 @@ void ArrayLiteral::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
// We'll reuse the same literal slot for all of the non-constant
// subexpressions that use a keyed store IC.
literal_slot_ = spec->AddKeyedStoreICSlot();
literal_slot_ = spec->AddKeyedStoreICSlot(language_mode);
return;
}
}
@ -803,8 +866,9 @@ void BinaryOperation::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
set_to_boolean_types(oracle->ToBooleanTypes(right()->test_id()));
}
void BinaryOperation::AssignFeedbackVectorSlots(
FeedbackVectorSpec* spec, FeedbackVectorSlotCache* cache) {
void BinaryOperation::AssignFeedbackSlots(FeedbackVectorSpec* spec,
LanguageMode language_mode,
FeedbackSlotCache* cache) {
// Feedback vector slot is only used by interpreter for binary operations.
// Full-codegen uses AstId to record type feedback.
switch (op()) {
@ -814,7 +878,7 @@ void BinaryOperation::AssignFeedbackVectorSlots(
case Token::OR:
return;
default:
type_feedback_slot_ = spec->AddInterpreterBinaryOpICSlot();
feedback_slot_ = spec->AddInterpreterBinaryOpICSlot();
return;
}
}
@ -824,8 +888,9 @@ static bool IsTypeof(Expression* expr) {
return maybe_unary != NULL && maybe_unary->op() == Token::TYPEOF;
}
void CompareOperation::AssignFeedbackVectorSlots(
FeedbackVectorSpec* spec, FeedbackVectorSlotCache* cache_) {
void CompareOperation::AssignFeedbackSlots(FeedbackVectorSpec* spec,
LanguageMode language_mode,
FeedbackSlotCache* cache_) {
// Feedback vector slot is only used by interpreter for binary operations.
// Full-codegen uses AstId to record type feedback.
switch (op()) {
@ -834,7 +899,7 @@ void CompareOperation::AssignFeedbackVectorSlots(
case Token::IN:
return;
default:
type_feedback_slot_ = spec->AddInterpreterCompareICSlot();
feedback_slot_ = spec->AddInterpreterCompareICSlot();
}
}
@ -983,8 +1048,9 @@ bool Expression::IsMonomorphic() const {
}
}
void Call::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
void Call::AssignFeedbackSlots(FeedbackVectorSpec* spec,
LanguageMode language_mode,
FeedbackSlotCache* cache) {
ic_slot_ = spec->AddCallICSlot();
}
@ -1022,9 +1088,10 @@ CaseClause::CaseClause(Expression* label, ZoneList<Statement*>* statements,
statements_(statements),
compare_type_(AstType::None()) {}
void CaseClause::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
type_feedback_slot_ = spec->AddInterpreterCompareICSlot();
void CaseClause::AssignFeedbackSlots(FeedbackVectorSpec* spec,
LanguageMode language_mode,
FeedbackSlotCache* cache) {
feedback_slot_ = spec->AddInterpreterCompareICSlot();
}
uint32_t Literal::Hash() {
@ -1042,5 +1109,14 @@ bool Literal::Match(void* literal1, void* literal2) {
(x->IsNumber() && y->IsNumber() && x->AsNumber() == y->AsNumber());
}
const char* CallRuntime::debug_name() {
#ifdef DEBUG
return is_jsruntime() ? NameForNativeContextIntrinsicIndex(context_index_)
: function_->name;
#else
return is_jsruntime() ? "(context function)" : function_->name;
#endif // DEBUG
}
} // namespace internal
} // namespace v8

409
deps/v8/src/ast/ast.h vendored
View File

@ -5,7 +5,6 @@
#ifndef V8_AST_AST_H_
#define V8_AST_AST_H_
#include "src/assembler.h"
#include "src/ast/ast-types.h"
#include "src/ast/ast-value-factory.h"
#include "src/ast/modules.h"
@ -15,11 +14,12 @@
#include "src/factory.h"
#include "src/globals.h"
#include "src/isolate.h"
#include "src/label.h"
#include "src/list.h"
#include "src/objects/literal-objects.h"
#include "src/parsing/token.h"
#include "src/runtime/runtime.h"
#include "src/small-pointer-list.h"
#include "src/utils.h"
namespace v8 {
namespace internal {
@ -127,27 +127,29 @@ class TypeFeedbackOracle;
AST_NODE_LIST(DEF_FORWARD_DECLARATION)
#undef DEF_FORWARD_DECLARATION
class FeedbackVectorSlotCache {
class FeedbackSlotCache {
public:
explicit FeedbackVectorSlotCache(Zone* zone)
: zone_(zone),
hash_map_(ZoneHashMap::kDefaultHashMapCapacity,
ZoneAllocationPolicy(zone)) {}
typedef std::pair<TypeofMode, Variable*> Key;
void Put(Variable* variable, FeedbackVectorSlot slot) {
ZoneHashMap::Entry* entry = hash_map_.LookupOrInsert(
variable, ComputePointerHash(variable), ZoneAllocationPolicy(zone_));
entry->value = reinterpret_cast<void*>(slot.ToInt());
explicit FeedbackSlotCache(Zone* zone) : map_(zone) {}
void Put(TypeofMode typeof_mode, Variable* variable, FeedbackSlot slot) {
Key key = std::make_pair(typeof_mode, variable);
auto entry = std::make_pair(key, slot);
map_.insert(entry);
}
ZoneHashMap::Entry* Get(Variable* variable) const {
return hash_map_.Lookup(variable, ComputePointerHash(variable));
FeedbackSlot Get(TypeofMode typeof_mode, Variable* variable) const {
Key key = std::make_pair(typeof_mode, variable);
auto iter = map_.find(key);
if (iter != map_.end()) {
return iter->second;
}
return FeedbackSlot();
}
private:
Zone* zone_;
ZoneHashMap hash_map_;
ZoneMap<Key, FeedbackSlot> map_;
};
@ -732,10 +734,10 @@ class ForInStatement final : public ForEachStatement {
void set_subject(Expression* e) { subject_ = e; }
// Type feedback information.
void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache);
FeedbackVectorSlot EachFeedbackSlot() const { return each_slot_; }
FeedbackVectorSlot ForInFeedbackSlot() {
void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
FeedbackSlotCache* cache);
FeedbackSlot EachFeedbackSlot() const { return each_slot_; }
FeedbackSlot ForInFeedbackSlot() {
DCHECK(!for_in_feedback_slot_.IsInvalid());
return for_in_feedback_slot_;
}
@ -771,8 +773,8 @@ class ForInStatement final : public ForEachStatement {
Expression* each_;
Expression* subject_;
FeedbackVectorSlot each_slot_;
FeedbackVectorSlot for_in_feedback_slot_;
FeedbackSlot each_slot_;
FeedbackSlot for_in_feedback_slot_;
class ForInTypeField
: public BitField<ForInType, ForEachStatement::kNextBitFieldIndex, 1> {};
@ -962,12 +964,10 @@ class CaseClause final : public Expression {
// CaseClause will have both a slot in the feedback vector and the
// TypeFeedbackId to record the type information. TypeFeedbackId is used by
// full codegen and the feedback vector slot is used by interpreter.
void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache);
void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
FeedbackSlotCache* cache);
FeedbackVectorSlot CompareOperationFeedbackSlot() {
return type_feedback_slot_;
}
FeedbackSlot CompareOperationFeedbackSlot() { return feedback_slot_; }
private:
friend class AstNodeFactory;
@ -980,7 +980,7 @@ class CaseClause final : public Expression {
Label body_target_;
ZoneList<Statement*>* statements_;
AstType* compare_type_;
FeedbackVectorSlot type_feedback_slot_;
FeedbackSlot feedback_slot_;
};
@ -1155,26 +1155,10 @@ class TryFinallyStatement final : public TryStatement {
class DebuggerStatement final : public Statement {
public:
void set_base_id(int id) { base_id_ = id; }
static int num_ids() { return parent_num_ids() + 1; }
BailoutId DebugBreakId() const { return BailoutId(local_id(0)); }
private:
friend class AstNodeFactory;
explicit DebuggerStatement(int pos)
: Statement(pos, kDebuggerStatement),
base_id_(BailoutId::None().ToInt()) {}
static int parent_num_ids() { return 0; }
int base_id() const {
DCHECK(!BailoutId(base_id_).IsNone());
return base_id_;
}
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
int base_id_;
explicit DebuggerStatement(int pos) : Statement(pos, kDebuggerStatement) {}
};
@ -1249,32 +1233,32 @@ class Literal final : public Expression {
const AstValue* value_;
};
class AstLiteralReindexer;
// Base class for literals that needs space in the corresponding JSFunction.
// Base class for literals that need space in the type feedback vector.
class MaterializedLiteral : public Expression {
public:
int literal_index() { return literal_index_; }
int depth() const {
// only callable after initialization.
DCHECK(depth_ >= 1);
return depth_;
}
void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
FeedbackSlotCache* cache) {
literal_slot_ = spec->AddLiteralSlot();
}
FeedbackSlot literal_slot() const { return literal_slot_; }
private:
int depth_ : 31;
int literal_index_;
friend class AstLiteralReindexer;
FeedbackSlot literal_slot_;
class IsSimpleField
: public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
protected:
MaterializedLiteral(int literal_index, int pos, NodeType type)
: Expression(pos, type), depth_(0), literal_index_(literal_index) {
MaterializedLiteral(int pos, NodeType type)
: Expression(pos, type), depth_(0) {
bit_field_ |= IsSimpleField::encode(false);
}
@ -1319,19 +1303,19 @@ class LiteralProperty : public ZoneObject {
bool is_computed_name() const { return is_computed_name_; }
FeedbackVectorSlot GetSlot(int offset = 0) const {
FeedbackSlot GetSlot(int offset = 0) const {
DCHECK_LT(offset, static_cast<int>(arraysize(slots_)));
return slots_[offset];
}
FeedbackVectorSlot GetStoreDataPropertySlot() const;
FeedbackSlot GetStoreDataPropertySlot() const;
void SetSlot(FeedbackVectorSlot slot, int offset = 0) {
void SetSlot(FeedbackSlot slot, int offset = 0) {
DCHECK_LT(offset, static_cast<int>(arraysize(slots_)));
slots_[offset] = slot;
}
void SetStoreDataPropertySlot(FeedbackVectorSlot slot);
void SetStoreDataPropertySlot(FeedbackSlot slot);
bool NeedsSetFunctionName() const;
@ -1341,7 +1325,7 @@ class LiteralProperty : public ZoneObject {
Expression* key_;
Expression* value_;
FeedbackVectorSlot slots_[2];
FeedbackSlot slots_[2];
bool is_computed_name_;
};
@ -1393,7 +1377,7 @@ class ObjectLiteral final : public MaterializedLiteral {
public:
typedef ObjectLiteralProperty Property;
Handle<FixedArray> constant_properties() const {
Handle<BoilerplateDescription> constant_properties() const {
DCHECK(!constant_properties_.is_null());
return constant_properties_;
}
@ -1407,6 +1391,9 @@ class ObjectLiteral final : public MaterializedLiteral {
bool has_shallow_properties() const {
return depth() == 1 && !has_elements() && !may_store_doubles();
}
bool has_rest_property() const {
return HasRestPropertyField::decode(bit_field_);
}
// Decide if a property should be in the object boilerplate.
static bool IsBoilerplateProperty(Property* property);
@ -1415,7 +1402,8 @@ class ObjectLiteral final : public MaterializedLiteral {
void InitDepthAndFlags();
// Get the constant properties fixed array, populating it if necessary.
Handle<FixedArray> GetOrBuildConstantProperties(Isolate* isolate) {
Handle<BoilerplateDescription> GetOrBuildConstantProperties(
Isolate* isolate) {
if (constant_properties_.is_null()) {
BuildConstantProperties(isolate);
}
@ -1449,7 +1437,8 @@ class ObjectLiteral final : public MaterializedLiteral {
kNoFlags = 0,
kFastElements = 1,
kShallowProperties = 1 << 1,
kDisableMementos = 1 << 2
kDisableMementos = 1 << 2,
kHasRestProperty = 1 << 3,
};
struct Accessors: public ZoneObject {
@ -1470,27 +1459,29 @@ class ObjectLiteral final : public MaterializedLiteral {
// Object literals need one feedback slot for each non-trivial value, as well
// as some slots for home objects.
void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache);
void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
FeedbackSlotCache* cache);
private:
friend class AstNodeFactory;
ObjectLiteral(ZoneList<Property*>* properties, int literal_index,
uint32_t boilerplate_properties, int pos)
: MaterializedLiteral(literal_index, pos, kObjectLiteral),
ObjectLiteral(ZoneList<Property*>* properties,
uint32_t boilerplate_properties, int pos,
bool has_rest_property)
: MaterializedLiteral(pos, kObjectLiteral),
boilerplate_properties_(boilerplate_properties),
properties_(properties) {
bit_field_ |= FastElementsField::encode(false) |
HasElementsField::encode(false) |
MayStoreDoublesField::encode(false);
MayStoreDoublesField::encode(false) |
HasRestPropertyField::encode(has_rest_property);
}
static int parent_num_ids() { return MaterializedLiteral::num_ids(); }
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
uint32_t boilerplate_properties_;
Handle<FixedArray> constant_properties_;
Handle<BoilerplateDescription> constant_properties_;
ZoneList<Property*>* properties_;
class FastElementsField
@ -1499,6 +1490,8 @@ class ObjectLiteral final : public MaterializedLiteral {
};
class MayStoreDoublesField
: public BitField<bool, HasElementsField::kNext, 1> {};
class HasRestPropertyField
: public BitField<bool, MayStoreDoublesField::kNext, 1> {};
};
@ -1529,14 +1522,14 @@ class AccessorTable
class RegExpLiteral final : public MaterializedLiteral {
public:
Handle<String> pattern() const { return pattern_->string(); }
const AstRawString* raw_pattern() const { return pattern_; }
int flags() const { return flags_; }
private:
friend class AstNodeFactory;
RegExpLiteral(const AstRawString* pattern, int flags, int literal_index,
int pos)
: MaterializedLiteral(literal_index, pos, kRegExpLiteral),
RegExpLiteral(const AstRawString* pattern, int flags, int pos)
: MaterializedLiteral(pos, kRegExpLiteral),
flags_(flags),
pattern_(pattern) {
set_depth(1);
@ -1554,9 +1547,7 @@ class ArrayLiteral final : public MaterializedLiteral {
Handle<ConstantElementsPair> constant_elements() const {
return constant_elements_;
}
ElementsKind constant_elements_kind() const {
return static_cast<ElementsKind>(constant_elements()->elements_kind());
}
ElementsKind constant_elements_kind() const;
ZoneList<Expression*>* values() const { return values_; }
@ -1603,10 +1594,7 @@ class ArrayLiteral final : public MaterializedLiteral {
ZoneList<Expression*>::iterator EndValue() const { return values_->end(); }
// Rewind an array literal omitting everything from the first spread on.
void RewindSpreads() {
values_->Rewind(first_spread_index_);
first_spread_index_ = -1;
}
void RewindSpreads();
enum Flags {
kNoFlags = 0,
@ -1614,16 +1602,15 @@ class ArrayLiteral final : public MaterializedLiteral {
kDisableMementos = 1 << 1
};
void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache);
FeedbackVectorSlot LiteralFeedbackSlot() const { return literal_slot_; }
void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
FeedbackSlotCache* cache);
FeedbackSlot LiteralFeedbackSlot() const { return literal_slot_; }
private:
friend class AstNodeFactory;
ArrayLiteral(ZoneList<Expression*>* values, int first_spread_index,
int literal_index, int pos)
: MaterializedLiteral(literal_index, pos, kArrayLiteral),
ArrayLiteral(ZoneList<Expression*>* values, int first_spread_index, int pos)
: MaterializedLiteral(pos, kArrayLiteral),
first_spread_index_(first_spread_index),
values_(values) {}
@ -1631,7 +1618,7 @@ class ArrayLiteral final : public MaterializedLiteral {
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
int first_spread_index_;
FeedbackVectorSlot literal_slot_;
FeedbackSlot literal_slot_;
Handle<ConstantElementsPair> constant_elements_;
ZoneList<Expression*>* values_;
};
@ -1693,10 +1680,10 @@ class VariableProxy final : public Expression {
return var()->IsUnallocated() || var()->IsLookupSlot();
}
void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache);
void AssignFeedbackSlots(FeedbackVectorSpec* spec, TypeofMode typeof_mode,
FeedbackSlotCache* cache);
FeedbackVectorSlot VariableFeedbackSlot() { return variable_feedback_slot_; }
FeedbackSlot VariableFeedbackSlot() { return variable_feedback_slot_; }
static int num_ids() { return parent_num_ids() + 1; }
BailoutId BeforeId() const { return BailoutId(local_id(0)); }
@ -1722,7 +1709,7 @@ class VariableProxy final : public Expression {
class HoleCheckModeField
: public BitField<HoleCheckMode, IsNewTargetField::kNext, 1> {};
FeedbackVectorSlot variable_feedback_slot_;
FeedbackSlot variable_feedback_slot_;
union {
const AstRawString* raw_name_; // if !is_resolved_
Variable* var_; // if is_resolved_
@ -1789,17 +1776,16 @@ class Property final : public Expression {
bool IsSuperAccess() { return obj()->IsSuperPropertyReference(); }
void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
FeedbackVectorSlotKind kind = key()->IsPropertyName()
? FeedbackVectorSlotKind::LOAD_IC
: FeedbackVectorSlotKind::KEYED_LOAD_IC;
property_feedback_slot_ = spec->AddSlot(kind);
void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
FeedbackSlotCache* cache) {
if (key()->IsPropertyName()) {
property_feedback_slot_ = spec->AddLoadICSlot();
} else {
property_feedback_slot_ = spec->AddKeyedLoadICSlot();
}
}
FeedbackVectorSlot PropertyFeedbackSlot() const {
return property_feedback_slot_;
}
FeedbackSlot PropertyFeedbackSlot() const { return property_feedback_slot_; }
// Returns the properties assign type.
static LhsKind GetAssignType(Property* property) {
@ -1832,7 +1818,7 @@ class Property final : public Expression {
class InlineCacheStateField
: public BitField<InlineCacheState, KeyTypeField::kNext, 4> {};
FeedbackVectorSlot property_feedback_slot_;
FeedbackSlot property_feedback_slot_;
Expression* obj_;
Expression* key_;
SmallMapList receiver_types_;
@ -1847,10 +1833,10 @@ class Call final : public Expression {
void set_expression(Expression* e) { expression_ = e; }
// Type feedback information.
void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache);
void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
FeedbackSlotCache* cache);
FeedbackVectorSlot CallFeedbackICSlot() const { return ic_slot_; }
FeedbackSlot CallFeedbackICSlot() const { return ic_slot_; }
SmallMapList* GetReceiverTypes() {
if (expression()->IsProperty()) {
@ -1900,6 +1886,10 @@ class Call final : public Expression {
}
void MarkTail() { bit_field_ = IsTailField::update(bit_field_, true); }
bool only_last_arg_is_spread() {
return !arguments_->is_empty() && arguments_->last()->IsSpread();
}
enum CallType {
GLOBAL_CALL,
WITH_CALL,
@ -1949,7 +1939,7 @@ class Call final : public Expression {
class IsTailField : public BitField<bool, IsUninitializedField::kNext, 1> {};
class IsPossiblyEvalField : public BitField<bool, IsTailField::kNext, 1> {};
FeedbackVectorSlot ic_slot_;
FeedbackSlot ic_slot_;
Expression* expression_;
ZoneList<Expression*>* arguments_;
Handle<JSFunction> target_;
@ -1965,14 +1955,14 @@ class CallNew final : public Expression {
void set_expression(Expression* e) { expression_ = e; }
// Type feedback information.
void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
FeedbackSlotCache* cache) {
// CallNew stores feedback in the exact same way as Call. We can
// piggyback on the type feedback infrastructure for calls.
callnew_feedback_slot_ = spec->AddCallICSlot();
}
FeedbackVectorSlot CallNewFeedbackSlot() {
FeedbackSlot CallNewFeedbackSlot() {
DCHECK(!callnew_feedback_slot_.IsInvalid());
return callnew_feedback_slot_;
}
@ -1999,6 +1989,10 @@ class CallNew final : public Expression {
set_is_monomorphic(true);
}
bool only_last_arg_is_spread() {
return !arguments_->is_empty() && arguments_->last()->IsSpread();
}
private:
friend class AstNodeFactory;
@ -2012,7 +2006,7 @@ class CallNew final : public Expression {
static int parent_num_ids() { return Expression::num_ids(); }
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
FeedbackVectorSlot callnew_feedback_slot_;
FeedbackSlot callnew_feedback_slot_;
Expression* expression_;
ZoneList<Expression*>* arguments_;
Handle<JSFunction> target_;
@ -2047,10 +2041,7 @@ class CallRuntime final : public Expression {
static int num_ids() { return parent_num_ids() + 1; }
BailoutId CallId() { return BailoutId(local_id(0)); }
const char* debug_name() {
return is_jsruntime() ? "(context function)" : function_->name;
}
const char* debug_name();
private:
friend class AstNodeFactory;
@ -2139,12 +2130,10 @@ class BinaryOperation final : public Expression {
// BinaryOperation will have both a slot in the feedback vector and the
// TypeFeedbackId to record the type information. TypeFeedbackId is used
// by full codegen and the feedback vector slot is used by interpreter.
void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache);
void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
FeedbackSlotCache* cache);
FeedbackVectorSlot BinaryOperationFeedbackSlot() const {
return type_feedback_slot_;
}
FeedbackSlot BinaryOperationFeedbackSlot() const { return feedback_slot_; }
TypeFeedbackId BinaryOperationFeedbackId() const {
return TypeFeedbackId(local_id(1));
@ -2182,7 +2171,7 @@ class BinaryOperation final : public Expression {
Expression* left_;
Expression* right_;
Handle<AllocationSite> allocation_site_;
FeedbackVectorSlot type_feedback_slot_;
FeedbackSlot feedback_slot_;
class OperatorField
: public BitField<Token::Value, Expression::kNextBitFieldIndex, 7> {};
@ -2228,13 +2217,13 @@ class CountOperation final : public Expression {
}
// Feedback slot for binary operation is only used by ignition.
FeedbackVectorSlot CountBinaryOpFeedbackSlot() const {
FeedbackSlot CountBinaryOpFeedbackSlot() const {
return binary_operation_slot_;
}
void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache);
FeedbackVectorSlot CountSlot() const { return slot_; }
void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
FeedbackSlotCache* cache);
FeedbackSlot CountSlot() const { return slot_; }
private:
friend class AstNodeFactory;
@ -2256,8 +2245,8 @@ class CountOperation final : public Expression {
: public BitField<KeyedAccessStoreMode, KeyTypeField::kNext, 3> {};
class TokenField : public BitField<Token::Value, StoreModeField::kNext, 7> {};
FeedbackVectorSlot slot_;
FeedbackVectorSlot binary_operation_slot_;
FeedbackSlot slot_;
FeedbackSlot binary_operation_slot_;
AstType* type_;
Expression* expression_;
SmallMapList receiver_types_;
@ -2284,12 +2273,10 @@ class CompareOperation final : public Expression {
// CompareOperation will have both a slot in the feedback vector and the
// TypeFeedbackId to record the type information. TypeFeedbackId is used
// by full codegen and the feedback vector slot is used by interpreter.
void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache);
void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
FeedbackSlotCache* cache);
FeedbackVectorSlot CompareOperationFeedbackSlot() const {
return type_feedback_slot_;
}
FeedbackSlot CompareOperationFeedbackSlot() const { return feedback_slot_; }
// Match special cases.
bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check);
@ -2316,7 +2303,7 @@ class CompareOperation final : public Expression {
Expression* right_;
AstType* combined_type_;
FeedbackVectorSlot type_feedback_slot_;
FeedbackSlot feedback_slot_;
class OperatorField
: public BitField<Token::Value, Expression::kNextBitFieldIndex, 7> {};
};
@ -2430,9 +2417,9 @@ class Assignment final : public Expression {
bit_field_ = StoreModeField::update(bit_field_, mode);
}
void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache);
FeedbackVectorSlot AssignmentSlot() const { return slot_; }
void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
FeedbackSlotCache* cache);
FeedbackSlot AssignmentSlot() const { return slot_; }
private:
friend class AstNodeFactory;
@ -2450,7 +2437,7 @@ class Assignment final : public Expression {
: public BitField<KeyedAccessStoreMode, KeyTypeField::kNext, 3> {};
class TokenField : public BitField<Token::Value, StoreModeField::kNext, 7> {};
FeedbackVectorSlot slot_;
FeedbackSlot slot_;
Expression* target_;
Expression* value_;
BinaryOperation* binary_operation_;
@ -2597,21 +2584,15 @@ class FunctionLiteral final : public Expression {
}
LanguageMode language_mode() const;
void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
// The + 1 is because we need an array with room for the literals
// as well as the feedback vector.
literal_feedback_slot_ =
spec->AddCreateClosureSlot(materialized_literal_count_ + 1);
void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
FeedbackSlotCache* cache) {
literal_feedback_slot_ = spec->AddCreateClosureSlot();
}
FeedbackVectorSlot LiteralFeedbackSlot() const {
return literal_feedback_slot_;
}
FeedbackSlot LiteralFeedbackSlot() const { return literal_feedback_slot_; }
static bool NeedsHomeObject(Expression* expr);
int materialized_literal_count() { return materialized_literal_count_; }
int expected_property_count() { return expected_property_count_; }
int parameter_count() { return parameter_count_; }
int function_length() { return function_length_; }
@ -2718,14 +2699,13 @@ class FunctionLiteral final : public Expression {
FunctionLiteral(Zone* zone, const AstString* name,
AstValueFactory* ast_value_factory, DeclarationScope* scope,
ZoneList<Statement*>* body, int materialized_literal_count,
int expected_property_count, int parameter_count,
int function_length, FunctionType function_type,
ZoneList<Statement*>* body, int expected_property_count,
int parameter_count, int function_length,
FunctionType function_type,
ParameterFlag has_duplicate_parameters,
EagerCompileHint eager_compile_hint, int position,
bool has_braces, int function_literal_id)
: Expression(position, kFunctionLiteral),
materialized_literal_count_(materialized_literal_count),
expected_property_count_(expected_property_count),
parameter_count_(parameter_count),
function_length_(function_length),
@ -2757,7 +2737,6 @@ class FunctionLiteral final : public Expression {
: public BitField<BailoutReason, ShouldNotBeUsedOnceHintField::kNext, 8> {
};
int materialized_literal_count_;
int expected_property_count_;
int parameter_count_;
int function_length_;
@ -2772,7 +2751,7 @@ class FunctionLiteral final : public Expression {
Handle<String> inferred_name_;
AstProperties ast_properties_;
int function_literal_id_;
FeedbackVectorSlot literal_feedback_slot_;
FeedbackSlot literal_feedback_slot_;
};
// Property is used for passing information
@ -2816,16 +2795,16 @@ class ClassLiteral final : public Expression {
// Object literals need one feedback slot for each non-trivial value, as well
// as some slots for home objects.
void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache);
void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
FeedbackSlotCache* cache);
bool NeedsProxySlot() const {
return class_variable_proxy() != nullptr &&
class_variable_proxy()->var()->IsUnallocated();
}
FeedbackVectorSlot HomeObjectSlot() const { return home_object_slot_; }
FeedbackVectorSlot ProxySlot() const { return proxy_slot_; }
FeedbackSlot HomeObjectSlot() const { return home_object_slot_; }
FeedbackSlot ProxySlot() const { return proxy_slot_; }
private:
friend class AstNodeFactory;
@ -2845,8 +2824,8 @@ class ClassLiteral final : public Expression {
}
int end_position_;
FeedbackVectorSlot home_object_slot_;
FeedbackVectorSlot proxy_slot_;
FeedbackSlot home_object_slot_;
FeedbackSlot proxy_slot_;
VariableProxy* class_variable_proxy_;
Expression* extends_;
FunctionLiteral* constructor_;
@ -2863,18 +2842,13 @@ class NativeFunctionLiteral final : public Expression {
public:
Handle<String> name() const { return name_->string(); }
v8::Extension* extension() const { return extension_; }
FeedbackVectorSlot LiteralFeedbackSlot() const {
return literal_feedback_slot_;
}
FeedbackSlot LiteralFeedbackSlot() const { return literal_feedback_slot_; }
void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
// 0 is a magic number here. It means we are holding the literals
// array for a native function literal, which needs to be
// the empty literals array.
// TODO(mvstanton): The FeedbackVectorSlotCache can be adapted
void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
FeedbackSlotCache* cache) {
// TODO(mvstanton): The FeedbackSlotCache can be adapted
// to always return the same slot for this case.
literal_feedback_slot_ = spec->AddCreateClosureSlot(0);
literal_feedback_slot_ = spec->AddCreateClosureSlot();
}
private:
@ -2888,7 +2862,7 @@ class NativeFunctionLiteral final : public Expression {
const AstRawString* name_;
v8::Extension* extension_;
FeedbackVectorSlot literal_feedback_slot_;
FeedbackSlot literal_feedback_slot_;
};
@ -2965,38 +2939,54 @@ class EmptyParentheses final : public Expression {
// (defined at https://tc39.github.io/ecma262/#sec-getiterator). Ignition
// desugars this into a LoadIC / JSLoadNamed, CallIC, and a type-check to
// validate return value of the Symbol.iterator() call.
enum class IteratorType { kNormal, kAsync };
class GetIterator final : public Expression {
public:
IteratorType hint() const { return hint_; }
Expression* iterable() const { return iterable_; }
void set_iterable(Expression* iterable) { iterable_ = iterable; }
static int num_ids() { return parent_num_ids(); }
void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
iterator_property_feedback_slot_ =
spec->AddSlot(FeedbackVectorSlotKind::LOAD_IC);
iterator_call_feedback_slot_ =
spec->AddSlot(FeedbackVectorSlotKind::CALL_IC);
void AssignFeedbackSlots(FeedbackVectorSpec* spec, LanguageMode language_mode,
FeedbackSlotCache* cache) {
iterator_property_feedback_slot_ = spec->AddLoadICSlot();
iterator_call_feedback_slot_ = spec->AddCallICSlot();
if (hint() == IteratorType::kAsync) {
async_iterator_property_feedback_slot_ = spec->AddLoadICSlot();
async_iterator_call_feedback_slot_ = spec->AddCallICSlot();
}
}
FeedbackVectorSlot IteratorPropertyFeedbackSlot() const {
FeedbackSlot IteratorPropertyFeedbackSlot() const {
return iterator_property_feedback_slot_;
}
FeedbackVectorSlot IteratorCallFeedbackSlot() const {
FeedbackSlot IteratorCallFeedbackSlot() const {
return iterator_call_feedback_slot_;
}
FeedbackSlot AsyncIteratorPropertyFeedbackSlot() const {
return async_iterator_property_feedback_slot_;
}
FeedbackSlot AsyncIteratorCallFeedbackSlot() const {
return async_iterator_call_feedback_slot_;
}
private:
friend class AstNodeFactory;
explicit GetIterator(Expression* iterable, int pos)
: Expression(pos, kGetIterator), iterable_(iterable) {}
explicit GetIterator(Expression* iterable, IteratorType hint, int pos)
: Expression(pos, kGetIterator), hint_(hint), iterable_(iterable) {}
IteratorType hint_;
Expression* iterable_;
FeedbackVectorSlot iterator_property_feedback_slot_;
FeedbackVectorSlot iterator_call_feedback_slot_;
FeedbackSlot iterator_property_feedback_slot_;
FeedbackSlot iterator_call_feedback_slot_;
FeedbackSlot async_iterator_property_feedback_slot_;
FeedbackSlot async_iterator_call_feedback_slot_;
};
// ----------------------------------------------------------------------------
@ -3212,6 +3202,11 @@ class AstNodeFactory final BASE_EMBEDDED {
return NULL;
}
ForOfStatement* NewForOfStatement(ZoneList<const AstRawString*>* labels,
int pos) {
return new (zone_) ForOfStatement(labels, pos);
}
ExpressionStatement* NewExpressionStatement(Expression* expression, int pos) {
return new (zone_) ExpressionStatement(expression, pos);
}
@ -3312,8 +3307,8 @@ class AstNodeFactory final BASE_EMBEDDED {
}
// A JavaScript symbol (ECMA-262 edition 6).
Literal* NewSymbolLiteral(const char* name, int pos) {
return new (zone_) Literal(ast_value_factory_->NewSymbol(name), pos);
Literal* NewSymbolLiteral(AstSymbol symbol, int pos) {
return new (zone_) Literal(ast_value_factory_->NewSymbol(symbol), pos);
}
Literal* NewNumberLiteral(double number, int pos, bool with_dot = false) {
@ -3342,10 +3337,10 @@ class AstNodeFactory final BASE_EMBEDDED {
}
ObjectLiteral* NewObjectLiteral(
ZoneList<ObjectLiteral::Property*>* properties, int literal_index,
uint32_t boilerplate_properties, int pos) {
return new (zone_)
ObjectLiteral(properties, literal_index, boilerplate_properties, pos);
ZoneList<ObjectLiteral::Property*>* properties,
uint32_t boilerplate_properties, int pos, bool has_rest_property) {
return new (zone_) ObjectLiteral(properties, boilerplate_properties, pos,
has_rest_property);
}
ObjectLiteral::Property* NewObjectLiteralProperty(
@ -3363,21 +3358,18 @@ class AstNodeFactory final BASE_EMBEDDED {
}
RegExpLiteral* NewRegExpLiteral(const AstRawString* pattern, int flags,
int literal_index, int pos) {
return new (zone_) RegExpLiteral(pattern, flags, literal_index, pos);
int pos) {
return new (zone_) RegExpLiteral(pattern, flags, pos);
}
ArrayLiteral* NewArrayLiteral(ZoneList<Expression*>* values,
int literal_index,
int pos) {
return new (zone_) ArrayLiteral(values, -1, literal_index, pos);
return new (zone_) ArrayLiteral(values, -1, pos);
}
ArrayLiteral* NewArrayLiteral(ZoneList<Expression*>* values,
int first_spread_index, int literal_index,
int pos) {
return new (zone_)
ArrayLiteral(values, first_spread_index, literal_index, pos);
int first_spread_index, int pos) {
return new (zone_) ArrayLiteral(values, first_spread_index, pos);
}
VariableProxy* NewVariableProxy(Variable* var,
@ -3501,30 +3493,30 @@ class AstNodeFactory final BASE_EMBEDDED {
FunctionLiteral* NewFunctionLiteral(
const AstRawString* name, DeclarationScope* scope,
ZoneList<Statement*>* body, int materialized_literal_count,
int expected_property_count, int parameter_count, int function_length,
ZoneList<Statement*>* body, int expected_property_count,
int parameter_count, int function_length,
FunctionLiteral::ParameterFlag has_duplicate_parameters,
FunctionLiteral::FunctionType function_type,
FunctionLiteral::EagerCompileHint eager_compile_hint, int position,
bool has_braces, int function_literal_id) {
return new (zone_) FunctionLiteral(
zone_, name, ast_value_factory_, scope, body,
materialized_literal_count, expected_property_count, parameter_count,
function_length, function_type, has_duplicate_parameters,
eager_compile_hint, position, has_braces, function_literal_id);
zone_, name, ast_value_factory_, scope, body, expected_property_count,
parameter_count, function_length, function_type,
has_duplicate_parameters, eager_compile_hint, position, has_braces,
function_literal_id);
}
// Creates a FunctionLiteral representing a top-level script, the
// result of an eval (top-level or otherwise), or the result of calling
// the Function constructor.
FunctionLiteral* NewScriptOrEvalFunctionLiteral(
DeclarationScope* scope, ZoneList<Statement*>* body,
int materialized_literal_count, int expected_property_count,
FunctionLiteral* NewScriptOrEvalFunctionLiteral(DeclarationScope* scope,
ZoneList<Statement*>* body,
int expected_property_count,
int parameter_count) {
return new (zone_) FunctionLiteral(
zone_, ast_value_factory_->empty_string(), ast_value_factory_, scope,
body, materialized_literal_count, expected_property_count,
parameter_count, parameter_count, FunctionLiteral::kAnonymousExpression,
body, expected_property_count, parameter_count, parameter_count,
FunctionLiteral::kAnonymousExpression,
FunctionLiteral::kNoDuplicateParameters,
FunctionLiteral::kShouldLazyCompile, 0, true,
FunctionLiteral::kIdTypeTopLevel);
@ -3581,8 +3573,9 @@ class AstNodeFactory final BASE_EMBEDDED {
return new (zone_) EmptyParentheses(pos);
}
GetIterator* NewGetIterator(Expression* iterable, int pos) {
return new (zone_) GetIterator(iterable, pos);
GetIterator* NewGetIterator(Expression* iterable, IteratorType hint,
int pos) {
return new (zone_) GetIterator(iterable, hint, pos);
}
Zone* zone() const { return zone_; }

View File

@ -7,6 +7,7 @@
#include "src/ast/scopes.h"
#include "src/objects-inl.h"
#include "src/objects/module-info.h"
#include "src/pending-compilation-error-handler.h"
namespace v8 {
namespace internal {

View File

@ -6,7 +6,6 @@
#define V8_AST_MODULES_H_
#include "src/parsing/scanner.h" // Only for Scanner::Location.
#include "src/pending-compilation-error-handler.h"
#include "src/zone/zone-containers.h"
namespace v8 {
@ -16,6 +15,7 @@ namespace internal {
class AstRawString;
class ModuleInfo;
class ModuleInfoEntry;
class PendingCompilationErrorHandler;
class ModuleDescriptor : public ZoneObject {
public:

View File

@ -440,9 +440,9 @@ void CallPrinter::PrintLiteral(const AstRawString* value, bool quote) {
#ifdef DEBUG
// A helper for ast nodes that use FeedbackVectorSlots.
// A helper for ast nodes that use FeedbackSlots.
static int FormatSlotNode(Vector<char>* buf, Expression* node,
const char* node_name, FeedbackVectorSlot slot) {
const char* node_name, FeedbackSlot slot) {
int pos = SNPrintF(*buf, "%s", node_name);
if (!slot.IsInvalid()) {
pos += SNPrintF(*buf + pos, " Slot(%d)", slot.ToInt());
@ -978,7 +978,7 @@ void AstPrinter::VisitLiteral(Literal* node) {
void AstPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
IndentedScope indent(this, "REGEXP LITERAL", node->position());
EmbeddedVector<char, 128> buf;
SNPrintF(buf, "literal_index = %d\n", node->literal_index());
SNPrintF(buf, "literal_slot = %d\n", node->literal_slot().ToInt());
PrintIndented(buf.start());
PrintLiteralIndented("PATTERN", node->pattern(), false);
int i = 0;
@ -997,7 +997,7 @@ void AstPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
void AstPrinter::VisitObjectLiteral(ObjectLiteral* node) {
IndentedScope indent(this, "OBJ LITERAL", node->position());
EmbeddedVector<char, 128> buf;
SNPrintF(buf, "literal_index = %d\n", node->literal_index());
SNPrintF(buf, "literal_slot = %d\n", node->literal_slot().ToInt());
PrintIndented(buf.start());
PrintObjectProperties(node->properties());
}
@ -1043,7 +1043,7 @@ void AstPrinter::VisitArrayLiteral(ArrayLiteral* node) {
IndentedScope indent(this, "ARRAY LITERAL", node->position());
EmbeddedVector<char, 128> buf;
SNPrintF(buf, "literal_index = %d\n", node->literal_index());
SNPrintF(buf, "literal_slot = %d\n", node->literal_slot().ToInt());
PrintIndented(buf.start());
if (node->values()->length() > 0) {
IndentedScope indent(this, "VALUES", node->position());

View File

@ -13,7 +13,9 @@
#include "src/messages.h"
#include "src/objects-inl.h"
#include "src/objects/module-info.h"
#include "src/objects/scope-info.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/preparsed-scope-data.h"
namespace v8 {
namespace internal {
@ -64,7 +66,7 @@ Variable* VariableMap::Declare(Zone* zone, Scope* scope,
return reinterpret_cast<Variable*>(p->value);
}
void VariableMap::DeclareName(Zone* zone, const AstRawString* name,
Variable* VariableMap::DeclareName(Zone* zone, const AstRawString* name,
VariableMode mode) {
Entry* p =
ZoneHashMap::LookupOrInsert(const_cast<AstRawString*>(name), name->hash(),
@ -75,6 +77,7 @@ void VariableMap::DeclareName(Zone* zone, const AstRawString* name,
p->value =
mode == VAR ? kDummyPreParserVariable : kDummyPreParserLexicalVariable;
}
return reinterpret_cast<Variable*>(p->value);
}
void VariableMap::Remove(Variable* var) {
@ -506,7 +509,7 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
}
}
bool var_created = false;
Variable* created_variable = nullptr;
// Write in assignments to var for each block-scoped function declaration
auto delegates = static_cast<SloppyBlockFunctionMap::Delegate*>(p->value);
@ -541,9 +544,9 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
if (!should_hoist) continue;
// Declare a var-style binding for the function in the outer scope
if (!var_created) {
var_created = true;
if (factory) {
DCHECK(!is_being_lazily_parsed_);
if (created_variable == nullptr) {
VariableProxy* proxy =
factory->NewVariableProxy(name, NORMAL_VARIABLE);
auto declaration =
@ -552,22 +555,28 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
// allow_harmony_restrictive_generators and
// sloppy_mode_block_scope_function_redefinition.
bool ok = true;
DeclareVariable(declaration, VAR,
Variable::DefaultInitializationFlag(VAR), false,
created_variable = DeclareVariable(
declaration, VAR, Variable::DefaultInitializationFlag(VAR), false,
nullptr, &ok);
CHECK(ok); // Based on the preceding check, this should not fail
} else {
DeclareVariableName(name, VAR);
}
}
if (factory) {
Expression* assignment = factory->NewAssignment(
Token::ASSIGN, NewUnresolved(factory, name),
delegate->scope()->NewUnresolved(factory, name), kNoSourcePosition);
Statement* statement =
factory->NewExpressionStatement(assignment, kNoSourcePosition);
delegate->set_statement(statement);
} else {
DCHECK(is_being_lazily_parsed_);
if (created_variable == nullptr) {
created_variable = DeclareVariableName(name, VAR);
if (created_variable != kDummyPreParserVariable &&
created_variable != kDummyPreParserLexicalVariable) {
DCHECK(FLAG_preparser_scope_analysis);
created_variable->set_maybe_assigned();
}
}
}
}
}
@ -625,6 +634,7 @@ void DeclarationScope::Analyze(ParseInfo* info, AnalyzeMode mode) {
#ifdef DEBUG
if (info->script_is_native() ? FLAG_print_builtin_scopes
: FLAG_print_scopes) {
PrintF("Global scope:\n");
scope->Print();
}
scope->CheckScopePositions();
@ -655,7 +665,7 @@ void DeclarationScope::DeclareArguments(AstValueFactory* ast_value_factory) {
// Note that it might never be accessed, in which case it won't be
// allocated during variable allocation.
arguments_ = Declare(zone(), ast_value_factory->arguments_string(), VAR);
} else if (IsLexicalVariableMode(arguments_->mode())) {
} else if (IsLexical(arguments_)) {
// Check if there's lexically declared variable named arguments to avoid
// redeclaration. See ES#sec-functiondeclarationinstantiation, step 20.
arguments_ = nullptr;
@ -698,7 +708,8 @@ Variable* DeclarationScope::DeclareGeneratorObjectVar(
DCHECK(is_function_scope() || is_module_scope());
DCHECK_NULL(generator_object_var());
Variable* result = EnsureRareData()->generator_object = NewTemporary(name);
Variable* result = EnsureRareData()->generator_object =
NewTemporary(name, kNotAssigned);
result->set_is_used();
return result;
}
@ -946,6 +957,7 @@ Variable* DeclarationScope::DeclareParameter(
if (mode == TEMPORARY) {
var = NewTemporary(name);
} else {
DCHECK_EQ(mode, VAR);
var = Declare(zone(), name, mode);
// TODO(wingo): Avoid O(n^2) check.
*is_duplicate = IsDeclaredParameter(name);
@ -958,6 +970,26 @@ Variable* DeclarationScope::DeclareParameter(
return var;
}
Variable* DeclarationScope::DeclareParameterName(
const AstRawString* name, bool is_rest,
AstValueFactory* ast_value_factory) {
DCHECK(!already_resolved_);
DCHECK(is_function_scope() || is_module_scope());
DCHECK(!has_rest_ || is_rest);
DCHECK(is_being_lazily_parsed_);
has_rest_ = is_rest;
if (name == ast_value_factory->arguments_string()) {
has_arguments_parameter_ = true;
}
if (FLAG_preparser_scope_analysis) {
Variable* var = Declare(zone(), name, VAR);
params_.Add(var, zone());
return var;
}
DeclareVariableName(name, VAR);
return nullptr;
}
Variable* Scope::DeclareLocal(const AstRawString* name, VariableMode mode,
InitializationFlag init_flag, VariableKind kind,
MaybeAssignedFlag maybe_assigned_flag) {
@ -966,7 +998,8 @@ Variable* Scope::DeclareLocal(const AstRawString* name, VariableMode mode,
// introduced during variable allocation, and TEMPORARY variables are
// allocated via NewTemporary().
DCHECK(IsDeclaredVariableMode(mode));
DCHECK(!GetDeclarationScope()->is_being_lazily_parsed());
DCHECK_IMPLIES(GetDeclarationScope()->is_being_lazily_parsed(),
mode == VAR || mode == LET || mode == CONST);
DCHECK(!GetDeclarationScope()->was_lazily_parsed());
return Declare(zone(), name, mode, kind, init_flag, maybe_assigned_flag);
}
@ -995,15 +1028,25 @@ Variable* Scope::DeclareVariable(
const AstRawString* name = proxy->raw_name();
bool is_function_declaration = declaration->IsFunctionDeclaration();
// Pessimistically assume that top-level variables will be assigned.
//
// Top-level variables in a script can be accessed by other scripts or even
// become global properties. While this does not apply to top-level variables
// in a module (assuming they are not exported), we must still mark these as
// assigned because they might be accessed by a lazily parsed top-level
// function, which, for efficiency, we preparse without variable tracking.
if (is_script_scope() || is_module_scope()) {
if (mode != CONST) proxy->set_is_assigned();
}
Variable* var = nullptr;
if (is_eval_scope() && is_sloppy(language_mode()) && mode == VAR) {
// In a var binding in a sloppy direct eval, pollute the enclosing scope
// with this new binding by doing the following:
// The proxy is bound to a lookup variable to force a dynamic declaration
// using the DeclareEvalVar or DeclareEvalFunction runtime functions.
VariableKind kind = NORMAL_VARIABLE;
// TODO(sigurds) figure out if kNotAssigned is OK here
var = new (zone()) Variable(this, name, mode, kind, init, kNotAssigned);
var = new (zone())
Variable(this, name, mode, NORMAL_VARIABLE, init, kMaybeAssigned);
var->AllocateTo(VariableLocation::LOOKUP, -1);
} else {
// Declare the variable in the declaration scope.
@ -1077,7 +1120,8 @@ Variable* Scope::DeclareVariable(
return var;
}
void Scope::DeclareVariableName(const AstRawString* name, VariableMode mode) {
Variable* Scope::DeclareVariableName(const AstRawString* name,
VariableMode mode) {
DCHECK(IsDeclaredVariableMode(mode));
DCHECK(!already_resolved_);
DCHECK(GetDeclarationScope()->is_being_lazily_parsed());
@ -1096,7 +1140,21 @@ void Scope::DeclareVariableName(const AstRawString* name, VariableMode mode) {
DCHECK(scope_info_.is_null());
// Declare the variable in the declaration scope.
variables_.DeclareName(zone(), name, mode);
if (FLAG_preparser_scope_analysis) {
Variable* var = LookupLocal(name);
DCHECK_NE(var, kDummyPreParserLexicalVariable);
DCHECK_NE(var, kDummyPreParserVariable);
if (var == nullptr) {
var = DeclareLocal(name, mode);
} else if (mode == VAR) {
DCHECK_EQ(var->mode(), VAR);
var->set_maybe_assigned();
}
var->set_is_used();
return var;
} else {
return variables_.DeclareName(zone(), name, mode);
}
}
VariableProxy* Scope::NewUnresolved(AstNodeFactory* factory,
@ -1124,6 +1182,7 @@ Variable* DeclarationScope::DeclareDynamicGlobal(const AstRawString* name,
VariableKind kind) {
DCHECK(is_script_scope());
return variables_.Declare(zone(), this, name, DYNAMIC_GLOBAL, kind);
// TODO(neis): Mark variable as maybe-assigned?
}
@ -1147,10 +1206,16 @@ bool Scope::RemoveUnresolved(VariableProxy* var) {
}
Variable* Scope::NewTemporary(const AstRawString* name) {
return NewTemporary(name, kMaybeAssigned);
}
Variable* Scope::NewTemporary(const AstRawString* name,
MaybeAssignedFlag maybe_assigned) {
DeclarationScope* scope = GetClosureScope();
Variable* var = new (zone())
Variable(scope, name, TEMPORARY, NORMAL_VARIABLE, kCreatedInitialized);
scope->AddLocal(var);
if (maybe_assigned == kMaybeAssigned) var->set_maybe_assigned();
return var;
}
@ -1365,7 +1430,11 @@ void DeclarationScope::ResetAfterPreparsing(AstValueFactory* ast_value_factory,
DCHECK(is_function_scope());
// Reset all non-trivial members.
if (!aborted || !IsArrowFunction(function_kind_)) {
// Do not remove parameters when lazy parsing an Arrow Function has failed,
// as the formal parameters are not re-parsed.
params_.Clear();
}
decls_.Clear();
locals_.Clear();
inner_scope_ = nullptr;
@ -1394,7 +1463,9 @@ void DeclarationScope::ResetAfterPreparsing(AstValueFactory* ast_value_factory,
was_lazily_parsed_ = !aborted;
}
void DeclarationScope::AnalyzePartially(AstNodeFactory* ast_node_factory) {
void DeclarationScope::AnalyzePartially(
AstNodeFactory* ast_node_factory,
PreParsedScopeData* preparsed_scope_data) {
DCHECK(!force_eager_compilation_);
VariableProxy* unresolved = nullptr;
@ -1415,7 +1486,20 @@ void DeclarationScope::AnalyzePartially(AstNodeFactory* ast_node_factory) {
!(MustAllocate(arguments_) && !has_arguments_parameter_)) {
arguments_ = nullptr;
}
if (FLAG_preparser_scope_analysis) {
// Decide context allocation for the locals and parameters and store the
// info away.
AllocateVariablesRecursively();
CollectVariableData(preparsed_scope_data);
}
}
#ifdef DEBUG
if (FLAG_print_scopes) {
PrintF("Inner function scope:\n");
Print();
}
#endif
ResetAfterPreparsing(ast_node_factory->ast_value_factory(), false);
@ -1501,6 +1585,10 @@ void PrintMap(int indent, const char* label, VariableMap* map, bool locals,
for (VariableMap::Entry* p = map->Start(); p != nullptr; p = map->Next(p)) {
Variable* var = reinterpret_cast<Variable*>(p->value);
if (var == function_var) continue;
if (var == kDummyPreParserVariable ||
var == kDummyPreParserLexicalVariable) {
continue;
}
bool local = !IsDynamicVariableMode(var->mode());
if ((locals ? local : !local) &&
(var->is_used() || !var->IsUnallocated())) {
@ -1550,6 +1638,9 @@ void Scope::Print(int n) {
}
PrintF(" { // (%d, %d)\n", start_position(), end_position());
if (is_hidden()) {
Indent(n1, "// is hidden\n");
}
// Function name, if any (named function literals, only).
if (function != nullptr) {
@ -1836,7 +1927,6 @@ void Scope::ResolveTo(ParseInfo* info, VariableProxy* proxy, Variable* var) {
#endif
DCHECK_NOT_NULL(var);
if (proxy->is_assigned()) var->set_maybe_assigned();
if (AccessNeedsHoleCheck(var, proxy, this)) proxy->set_needs_hole_check();
proxy->BindTo(var);
}
@ -1875,6 +1965,11 @@ void Scope::ResolveVariablesRecursively(ParseInfo* info) {
VariableProxy* Scope::FetchFreeVariables(DeclarationScope* max_outer_scope,
ParseInfo* info,
VariableProxy* stack) {
// Module variables must be allocated before variable resolution
// to ensure that AccessNeedsHoleCheck() can detect import variables.
if (info != nullptr && is_module_scope()) {
AsModuleScope()->AllocateModuleVariables();
}
// Lazy parsed declaration scopes are already partially analyzed. If there are
// unresolved references remaining, they just need to be resolved in outer
// scopes.
@ -1901,6 +1996,9 @@ VariableProxy* Scope::FetchFreeVariables(DeclarationScope* max_outer_scope,
if (!var->is_dynamic() && lookup != this) var->ForceContextAllocation();
} else {
var->set_is_used();
if (proxy->is_assigned()) {
var->set_maybe_assigned();
}
}
}
}
@ -1916,6 +2014,9 @@ VariableProxy* Scope::FetchFreeVariables(DeclarationScope* max_outer_scope,
}
bool Scope::MustAllocate(Variable* var) {
if (var == kDummyPreParserLexicalVariable || var == kDummyPreParserVariable) {
return true;
}
DCHECK(var->location() != VariableLocation::MODULE);
// Give var a read/write use if there is a chance it might be accessed
// via an eval() call. This is only possible if the variable has a
@ -2091,7 +2192,8 @@ void ModuleScope::AllocateModuleVariables() {
void Scope::AllocateVariablesRecursively() {
DCHECK(!already_resolved_);
DCHECK_EQ(0, num_stack_slots_);
DCHECK_IMPLIES(!FLAG_preparser_scope_analysis, num_stack_slots_ == 0);
// Don't allocate variables of preparsed scopes.
if (is_declaration_scope() && AsDeclarationScope()->was_lazily_parsed()) {
return;
@ -2167,6 +2269,17 @@ void Scope::AllocateDebuggerScopeInfos(Isolate* isolate,
}
}
void Scope::CollectVariableData(PreParsedScopeData* data) {
PreParsedScopeData::ScopeScope scope_scope(data, scope_type(),
start_position(), end_position());
for (Variable* local : locals_) {
scope_scope.MaybeAddVariable(local);
}
for (Scope* inner = inner_scope_; inner != nullptr; inner = inner->sibling_) {
inner->CollectVariableData(data);
}
}
int Scope::StackLocalCount() const {
Variable* function =
is_function_scope() ? AsDeclarationScope()->function_var() : nullptr;

View File

@ -9,7 +9,6 @@
#include "src/base/hashmap.h"
#include "src/globals.h"
#include "src/objects.h"
#include "src/objects/scope-info.h"
#include "src/zone/zone.h"
namespace v8 {
@ -20,6 +19,7 @@ class AstValueFactory;
class AstRawString;
class Declaration;
class ParseInfo;
class PreParsedScopeData;
class SloppyBlockFunctionStatement;
class Statement;
class StringSet;
@ -39,7 +39,8 @@ class VariableMap: public ZoneHashMap {
// Records that "name" exists (if not recorded yet) but doesn't create a
// Variable. Useful for preparsing.
void DeclareName(Zone* zone, const AstRawString* name, VariableMode mode);
Variable* DeclareName(Zone* zone, const AstRawString* name,
VariableMode mode);
Variable* Lookup(const AstRawString* name);
void Remove(Variable* var);
@ -180,7 +181,8 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
bool* sloppy_mode_block_scope_function_redefinition,
bool* ok);
void DeclareVariableName(const AstRawString* name, VariableMode mode);
// The return value is meaningful only if FLAG_preparser_scope_analysis is on.
Variable* DeclareVariableName(const AstRawString* name, VariableMode mode);
// Declarations list.
ThreadedList<Declaration>* declarations() { return &decls_; }
@ -409,7 +411,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
Scope* GetOuterScopeWithContext();
// Analyze() must have been called once to create the ScopeInfo.
Handle<ScopeInfo> scope_info() {
Handle<ScopeInfo> scope_info() const {
DCHECK(!scope_info_.is_null());
return scope_info_;
}
@ -481,6 +483,8 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// should also be invoked after resolution.
bool NeedsScopeInfo() const;
Variable* NewTemporary(const AstRawString* name,
MaybeAssignedFlag maybe_assigned);
Zone* zone_;
// Scope tree.
@ -586,6 +590,8 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
void AllocateDebuggerScopeInfos(Isolate* isolate,
MaybeHandle<ScopeInfo> outer_scope);
void CollectVariableData(PreParsedScopeData* data);
// Construct a scope based on the scope info.
Scope(Zone* zone, ScopeType type, Handle<ScopeInfo> scope_info);
@ -605,7 +611,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
friend class ScopeTestHelper;
};
class DeclarationScope : public Scope {
class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
public:
DeclarationScope(Zone* zone, Scope* outer_scope, ScopeType scope_type,
FunctionKind function_kind = kNormalFunction);
@ -688,6 +694,11 @@ class DeclarationScope : public Scope {
bool is_optional, bool is_rest, bool* is_duplicate,
AstValueFactory* ast_value_factory);
// Declares that a parameter with the name exists. Creates a Variable and
// returns it if FLAG_preparser_scope_analysis is on.
Variable* DeclareParameterName(const AstRawString* name, bool is_rest,
AstValueFactory* ast_value_factory);
// Declare an implicit global variable in this scope which must be a
// script scope. The variable was introduced (possibly from an inner
// scope) by a reference to an unresolved variable with no intervening
@ -807,7 +818,8 @@ class DeclarationScope : public Scope {
// records variables which cannot be resolved inside the Scope (we don't yet
// know what they will resolve to since the outer Scopes are incomplete) and
// migrates them into migrate_to.
void AnalyzePartially(AstNodeFactory* ast_node_factory);
void AnalyzePartially(AstNodeFactory* ast_node_factory,
PreParsedScopeData* preparsed_scope_data);
Handle<StringSet> CollectNonLocals(ParseInfo* info,
Handle<StringSet> non_locals);
@ -887,8 +899,6 @@ class DeclarationScope : public Scope {
Variable* arguments_;
struct RareData : public ZoneObject {
void* operator new(size_t size, Zone* zone) { return zone->New(size); }
// Convenience variable; Subclass constructor only
Variable* this_function = nullptr;

View File

@ -37,9 +37,8 @@ Variable::Variable(Scope* scope, const AstRawString* name, VariableMode mode,
bool Variable::IsGlobalObjectProperty() const {
// Temporaries are never global, they must always be allocated in the
// activation frame.
return (IsDynamicVariableMode(mode()) ||
(IsDeclaredVariableMode(mode()) && !IsLexicalVariableMode(mode()))) &&
scope_ != NULL && scope_->is_script_scope();
return (IsDynamicVariableMode(mode()) || mode() == VAR) &&
scope_ != nullptr && scope_->is_script_scope();
}
} // namespace internal

View File

@ -4,7 +4,7 @@
#include "src/background-parsing-task.h"
#include "src/debug/debug.h"
#include "src/objects-inl.h"
#include "src/parsing/parser.h"
namespace v8 {
@ -13,7 +13,6 @@ namespace internal {
void StreamedSource::Release() {
parser.reset();
info.reset();
zone.reset();
}
BackgroundParsingTask::BackgroundParsingTask(
@ -29,10 +28,8 @@ BackgroundParsingTask::BackgroundParsingTask(
// Prepare the data for the internalization phase and compilation phase, which
// will happen in the main thread after parsing.
Zone* zone = new Zone(isolate->allocator(), ZONE_NAME);
ParseInfo* info = new ParseInfo(zone);
ParseInfo* info = new ParseInfo(isolate->allocator());
info->set_toplevel();
source->zone.reset(zone);
source->info.reset(info);
info->set_isolate(isolate);
info->set_source_stream(source->source_stream.get());

View File

@ -38,7 +38,6 @@ struct StreamedSource {
// between parsing and compilation. These need to be initialized before the
// compilation starts.
UnicodeCache unicode_cache;
std::unique_ptr<Zone> zone;
std::unique_ptr<ParseInfo> info;
std::unique_ptr<Parser> parser;

View File

@ -186,6 +186,7 @@ namespace internal {
"Sloppy function expects JSReceiver as receiver.") \
V(kSmiAdditionOverflow, "Smi addition overflow") \
V(kSmiSubtractionOverflow, "Smi subtraction overflow") \
V(kSpreadCall, "Call with spread argument") \
V(kStackAccessBelowStackPointer, "Stack access below stack pointer") \
V(kStackFrameTypesMustMatch, "Stack frame types must match") \
V(kSuperReference, "Super reference") \
@ -212,14 +213,10 @@ namespace internal {
"Unexpected ElementsKind in array constructor") \
V(kUnexpectedFallthroughFromCharCodeAtSlowCase, \
"Unexpected fallthrough from CharCodeAt slow case") \
V(kUnexpectedFallthroughFromCharFromCodeSlowCase, \
"Unexpected fallthrough from CharFromCode slow case") \
V(kUnexpectedFallThroughFromStringComparison, \
"Unexpected fall-through from string comparison") \
V(kUnexpectedFallthroughToCharCodeAtSlowCase, \
"Unexpected fallthrough to CharCodeAt slow case") \
V(kUnexpectedFallthroughToCharFromCodeSlowCase, \
"Unexpected fallthrough to CharFromCode slow case") \
V(kUnexpectedFPUStackDepthAfterInstruction, \
"Unexpected FPU stack depth after instruction") \
V(kUnexpectedInitialMapForArrayFunction1, \
@ -252,6 +249,8 @@ namespace internal {
V(kUnsupportedPhiUseOfArguments, "Unsupported phi use of arguments") \
V(kUnsupportedPhiUseOfConstVariable, \
"Unsupported phi use of const or let variable") \
V(kUnexpectedReturnFromFrameDropper, \
"Unexpectedly returned from dropping frames") \
V(kUnexpectedReturnFromThrow, "Unexpectedly returned from a throw") \
V(kUnsupportedSwitchStatement, "Unsupported switch statement") \
V(kUnsupportedTaggedImmediate, "Unsupported tagged immediate") \

View File

@ -31,7 +31,9 @@ class AtomicNumber {
&value_, -static_cast<base::AtomicWord>(decrement)));
}
V8_INLINE T Value() { return static_cast<T>(base::Acquire_Load(&value_)); }
V8_INLINE T Value() const {
return static_cast<T>(base::Acquire_Load(&value_));
}
V8_INLINE void SetValue(T new_value) {
base::Release_Store(&value_, static_cast<base::AtomicWord>(new_value));

View File

@ -40,6 +40,11 @@ class TemplateHashMapImpl {
MatchFun match = MatchFun(),
AllocationPolicy allocator = AllocationPolicy());
// Clones the given hashmap and creates a copy with the same entries.
TemplateHashMapImpl(const TemplateHashMapImpl<Key, Value, MatchFun,
AllocationPolicy>* original,
AllocationPolicy allocator = AllocationPolicy());
~TemplateHashMapImpl();
// If an entry with matching key is found, returns that entry.
@ -119,6 +124,8 @@ class TemplateHashMapImpl {
uint32_t hash,
AllocationPolicy allocator = AllocationPolicy());
void Resize(AllocationPolicy allocator);
DISALLOW_COPY_AND_ASSIGN(TemplateHashMapImpl);
};
template <typename Key, typename Value, typename MatchFun,
class AllocationPolicy>
@ -129,6 +136,19 @@ TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::
Initialize(initial_capacity, allocator);
}
template <typename Key, typename Value, typename MatchFun,
class AllocationPolicy>
TemplateHashMapImpl<Key, Value, MatchFun, AllocationPolicy>::
TemplateHashMapImpl(const TemplateHashMapImpl<Key, Value, MatchFun,
AllocationPolicy>* original,
AllocationPolicy allocator)
: capacity_(original->capacity_),
occupancy_(original->occupancy_),
match_(original->match_) {
map_ = reinterpret_cast<Entry*>(allocator.New(capacity_ * sizeof(Entry)));
memcpy(map_, original->map_, capacity_ * sizeof(Entry));
}
template <typename Key, typename Value, typename MatchFun,
class AllocationPolicy>
TemplateHashMapImpl<Key, Value, MatchFun,
@ -382,6 +402,14 @@ class CustomMatcherTemplateHashMapImpl
AllocationPolicy allocator = AllocationPolicy())
: Base(capacity, HashEqualityThenKeyMatcher<void*, MatchFun>(match),
allocator) {}
CustomMatcherTemplateHashMapImpl(
const CustomMatcherTemplateHashMapImpl<AllocationPolicy>* original,
AllocationPolicy allocator = AllocationPolicy())
: Base(original, allocator) {}
private:
DISALLOW_COPY_AND_ASSIGN(CustomMatcherTemplateHashMapImpl);
};
typedef CustomMatcherTemplateHashMapImpl<DefaultAllocationPolicy>

View File

@ -43,13 +43,13 @@ namespace base {
//
// We make sure CHECK et al. always evaluates their arguments, as
// doing CHECK(FunctionWithSideEffect()) is a common idiom.
#define CHECK(condition) \
#define CHECK_WITH_MSG(condition, message) \
do { \
if (V8_UNLIKELY(!(condition))) { \
V8_Fatal(__FILE__, __LINE__, "Check failed: %s.", #condition); \
V8_Fatal(__FILE__, __LINE__, "Check failed: %s.", message); \
} \
} while (0)
#define CHECK(condition) CHECK_WITH_MSG(condition, #condition)
#ifdef DEBUG
@ -70,7 +70,12 @@ namespace base {
// Make all CHECK functions discard their log strings to reduce code
// bloat for official release builds.
#define CHECK_OP(name, op, lhs, rhs) CHECK((lhs)op(rhs))
#define CHECK_OP(name, op, lhs, rhs) \
do { \
bool _cmp = \
::v8::base::Cmp##name##Impl<decltype(lhs), decltype(rhs)>(lhs, rhs); \
CHECK_WITH_MSG(_cmp, #lhs " " #op " " #rhs); \
} while (0)
#endif
@ -199,7 +204,8 @@ DEFINE_CHECK_OP_IMPL(GT, > )
#define CHECK_GT(lhs, rhs) CHECK_OP(GT, >, lhs, rhs)
#define CHECK_NULL(val) CHECK((val) == nullptr)
#define CHECK_NOT_NULL(val) CHECK((val) != nullptr)
#define CHECK_IMPLIES(lhs, rhs) CHECK(!(lhs) || (rhs))
#define CHECK_IMPLIES(lhs, rhs) \
CHECK_WITH_MSG(!(lhs) || (rhs), #lhs " implies " #rhs)
} // namespace base
} // namespace v8

View File

@ -620,12 +620,15 @@ void Thread::Start() {
result = pthread_attr_init(&attr);
DCHECK_EQ(0, result);
size_t stack_size = stack_size_;
#if V8_OS_AIX
if (stack_size == 0) {
// Default on AIX is 96KB -- bump up to 2MB
#if V8_OS_MACOSX
// Default on Mac OS X is 512kB -- bump up to 1MB
stack_size = 1 * 1024 * 1024;
#elif V8_OS_AIX
// Default on AIX is 96kB -- bump up to 2MB
stack_size = 2 * 1024 * 1024;
}
#endif
}
if (stack_size > 0) {
result = pthread_attr_setstacksize(&attr, stack_size);
DCHECK_EQ(0, result);

View File

@ -9,12 +9,14 @@
#include "src/base/ieee754.h"
#include "src/code-stubs.h"
#include "src/compiler.h"
#include "src/debug/debug.h"
#include "src/extensions/externalize-string-extension.h"
#include "src/extensions/free-buffer-extension.h"
#include "src/extensions/gc-extension.h"
#include "src/extensions/ignition-statistics-extension.h"
#include "src/extensions/statistics-extension.h"
#include "src/extensions/trigger-failure-extension.h"
#include "src/ffi/ffi-compiler.h"
#include "src/heap/heap.h"
#include "src/isolate-inl.h"
#include "src/snapshot/natives.h"
@ -173,6 +175,7 @@ class Genesis BASE_EMBEDDED {
void CreateStrictModeFunctionMaps(Handle<JSFunction> empty);
void CreateIteratorMaps(Handle<JSFunction> empty);
void CreateAsyncIteratorMaps();
void CreateAsyncFunctionMaps(Handle<JSFunction> empty);
void CreateJSProxyMaps();
@ -216,6 +219,8 @@ class Genesis BASE_EMBEDDED {
HARMONY_SHIPPING(DECLARE_FEATURE_INITIALIZATION)
#undef DECLARE_FEATURE_INITIALIZATION
void InitializeGlobal_enable_fast_array_builtins();
Handle<JSFunction> InstallArrayBuffer(Handle<JSObject> target,
const char* name, Builtins::Name call,
BuiltinFunctionId id);
@ -362,7 +367,6 @@ void InstallFunction(Handle<JSObject> target, Handle<Name> property_name,
if (target->IsJSGlobalObject()) {
function->shared()->set_instance_class_name(*function_name);
}
function->shared()->set_native(true);
}
void InstallFunction(Handle<JSObject> target, Handle<JSFunction> function,
@ -380,11 +384,14 @@ Handle<JSFunction> CreateFunction(Isolate* isolate, Handle<String> name,
Factory* factory = isolate->factory();
Handle<Code> call_code(isolate->builtins()->builtin(call));
Handle<JSObject> prototype;
return maybe_prototype.ToHandle(&prototype)
Handle<JSFunction> result =
maybe_prototype.ToHandle(&prototype)
? factory->NewFunction(name, call_code, prototype, type,
instance_size, strict_function_map)
: factory->NewFunctionWithoutPrototype(name, call_code,
strict_function_map);
result->shared()->set_native(true);
return result;
}
Handle<JSFunction> InstallFunction(Handle<JSObject> target, Handle<Name> name,
@ -468,14 +475,12 @@ void SimpleInstallGetterSetter(Handle<JSObject> base, Handle<String> name,
.ToHandleChecked();
Handle<JSFunction> getter =
SimpleCreateFunction(isolate, getter_name, call_getter, 0, true);
getter->shared()->set_native(true);
Handle<String> setter_name =
Name::ToFunctionName(name, isolate->factory()->set_string())
.ToHandleChecked();
Handle<JSFunction> setter =
SimpleCreateFunction(isolate, setter_name, call_setter, 1, true);
setter->shared()->set_native(true);
JSObject::DefineAccessor(base, name, getter, setter, attribs).Check();
}
@ -491,7 +496,6 @@ Handle<JSFunction> SimpleInstallGetter(Handle<JSObject> base,
.ToHandleChecked();
Handle<JSFunction> getter =
SimpleCreateFunction(isolate, getter_name, call, 0, adapt);
getter->shared()->set_native(true);
Handle<Object> setter = isolate->factory()->undefined_value();
@ -721,7 +725,6 @@ void Genesis::CreateIteratorMaps(Handle<JSFunction> empty) {
Handle<JSFunction> iterator_prototype_iterator = SimpleCreateFunction(
isolate(), factory()->NewStringFromAsciiChecked("[Symbol.iterator]"),
Builtins::kReturnReceiver, 0, true);
iterator_prototype_iterator->shared()->set_native(true);
JSObject::AddProperty(iterator_prototype, factory()->iterator_symbol(),
iterator_prototype_iterator, DONT_ENUM);
@ -760,10 +763,12 @@ void Genesis::CreateIteratorMaps(Handle<JSFunction> empty) {
SimpleInstallFunction(generator_object_prototype, "throw",
Builtins::kGeneratorPrototypeThrow, 1, true);
// Internal version of generator_prototype_next, flagged as non-native.
// Internal version of generator_prototype_next, flagged as non-native such
// that it doesn't show up in Error traces.
Handle<JSFunction> generator_next_internal =
SimpleCreateFunction(isolate(), factory()->next_string(),
Builtins::kGeneratorPrototypeNext, 1, true);
generator_next_internal->shared()->set_native(false);
native_context()->set_generator_next_internal(*generator_next_internal);
// Create maps for generator functions and their prototypes. Store those
@ -785,6 +790,50 @@ void Genesis::CreateIteratorMaps(Handle<JSFunction> empty) {
*generator_object_prototype_map);
}
void Genesis::CreateAsyncIteratorMaps() {
// %AsyncIteratorPrototype%
// proposal-async-iteration/#sec-asynciteratorprototype
Handle<JSObject> async_iterator_prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
Handle<JSFunction> async_iterator_prototype_iterator = SimpleCreateFunction(
isolate(), factory()->NewStringFromAsciiChecked("[Symbol.asyncIterator]"),
Builtins::kReturnReceiver, 0, true);
JSObject::AddProperty(async_iterator_prototype,
factory()->async_iterator_symbol(),
async_iterator_prototype_iterator, DONT_ENUM);
// %AsyncFromSyncIteratorPrototype%
// proposal-async-iteration/#sec-%asyncfromsynciteratorprototype%-object
Handle<JSObject> async_from_sync_iterator_prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
SimpleInstallFunction(async_from_sync_iterator_prototype,
factory()->next_string(),
Builtins::kAsyncFromSyncIteratorPrototypeNext, 1, true);
SimpleInstallFunction(
async_from_sync_iterator_prototype, factory()->return_string(),
Builtins::kAsyncFromSyncIteratorPrototypeReturn, 1, true);
SimpleInstallFunction(
async_from_sync_iterator_prototype, factory()->throw_string(),
Builtins::kAsyncFromSyncIteratorPrototypeThrow, 1, true);
JSObject::AddProperty(
async_from_sync_iterator_prototype, factory()->to_string_tag_symbol(),
factory()->NewStringFromAsciiChecked("Async-from-Sync Iterator"),
static_cast<PropertyAttributes>(DONT_ENUM | READ_ONLY));
JSObject::ForceSetPrototype(async_from_sync_iterator_prototype,
async_iterator_prototype);
Handle<Map> async_from_sync_iterator_map = factory()->NewMap(
JS_ASYNC_FROM_SYNC_ITERATOR_TYPE, JSAsyncFromSyncIterator::kSize);
Map::SetPrototype(async_from_sync_iterator_map,
async_from_sync_iterator_prototype);
native_context()->set_async_from_sync_iterator_map(
*async_from_sync_iterator_map);
}
void Genesis::CreateAsyncFunctionMaps(Handle<JSFunction> empty) {
// %AsyncFunctionPrototype% intrinsic
Handle<JSObject> async_function_prototype =
@ -1295,6 +1344,16 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
class_function_map_->SetConstructor(*function_fun);
}
{
// --- A s y n c F r o m S y n c I t e r a t o r
Handle<Code> code = isolate->builtins()->AsyncIteratorValueUnwrap();
Handle<SharedFunctionInfo> info =
factory->NewSharedFunctionInfo(factory->empty_string(), code, false);
info->set_internal_formal_parameter_count(1);
info->set_length(1);
native_context()->set_async_iterator_value_unwrap_shared_fun(*info);
}
{ // --- A r r a y ---
Handle<JSFunction> array_function =
InstallFunction(global, "Array", JS_ARRAY_TYPE, JSArray::kSize,
@ -1371,6 +1430,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate, factory->ArrayIterator_string(),
JS_FAST_ARRAY_VALUE_ITERATOR_TYPE, JSArrayIterator::kSize,
array_iterator_prototype, Builtins::kIllegal);
array_iterator_function->shared()->set_native(false);
array_iterator_function->shared()->set_instance_class_name(
isolate->heap()->ArrayIterator_string());
@ -1585,6 +1645,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kStringPrototypeLocaleCompare, 1, true);
SimpleInstallFunction(prototype, "normalize",
Builtins::kStringPrototypeNormalize, 0, false);
SimpleInstallFunction(prototype, "replace",
Builtins::kStringPrototypeReplace, 2, true);
SimpleInstallFunction(prototype, "split", Builtins::kStringPrototypeSplit,
2, true);
SimpleInstallFunction(prototype, "substr", Builtins::kStringPrototypeSubstr,
2, true);
SimpleInstallFunction(prototype, "substring",
@ -1599,13 +1663,22 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtins::kStringPrototypeTrimLeft, 0, false);
SimpleInstallFunction(prototype, "trimRight",
Builtins::kStringPrototypeTrimRight, 0, false);
SimpleInstallFunction(prototype, "toLocaleLowerCase",
Builtins::kStringPrototypeToLocaleLowerCase, 0,
false);
SimpleInstallFunction(prototype, "toLocaleUpperCase",
Builtins::kStringPrototypeToLocaleUpperCase, 0,
false);
SimpleInstallFunction(prototype, "toLowerCase",
Builtins::kStringPrototypeToLowerCase, 0, false);
SimpleInstallFunction(prototype, "toUpperCase",
Builtins::kStringPrototypeToUpperCase, 0, false);
SimpleInstallFunction(prototype, "valueOf",
Builtins::kStringPrototypeValueOf, 0, true);
Handle<JSFunction> iterator = SimpleCreateFunction(
isolate, factory->NewStringFromAsciiChecked("[Symbol.iterator]"),
Builtins::kStringPrototypeIterator, 0, true);
iterator->shared()->set_native(true);
iterator->shared()->set_builtin_function_id(kStringIterator);
JSObject::AddProperty(prototype, factory->iterator_symbol(), iterator,
static_cast<PropertyAttributes>(DONT_ENUM));
@ -1641,6 +1714,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate, factory->NewStringFromAsciiChecked("StringIterator"),
JS_STRING_ITERATOR_TYPE, JSStringIterator::kSize,
string_iterator_prototype, Builtins::kIllegal);
string_iterator_function->shared()->set_native(false);
native_context()->set_string_iterator_map(
string_iterator_function->initial_map());
}
@ -1923,6 +1997,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> function =
SimpleCreateFunction(isolate, factory->empty_string(),
Builtins::kPromiseInternalConstructor, 1, true);
function->shared()->set_native(false);
InstallWithIntrinsicDefaultProto(
isolate, function, Context::PROMISE_INTERNAL_CONSTRUCTOR_INDEX);
}
@ -1934,18 +2009,11 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Context::IS_PROMISE_INDEX);
}
{ // Internal: PerformPromiseThen
Handle<JSFunction> function =
SimpleCreateFunction(isolate, factory->empty_string(),
Builtins::kPerformPromiseThen, 4, false);
InstallWithIntrinsicDefaultProto(isolate, function,
Context::PERFORM_PROMISE_THEN_INDEX);
}
{ // Internal: ResolvePromise
// Also exposed as extrasUtils.resolvePromise.
Handle<JSFunction> function = SimpleCreateFunction(
isolate, factory->empty_string(), Builtins::kResolvePromise, 2, true);
function->shared()->set_native(false);
InstallWithIntrinsicDefaultProto(isolate, function,
Context::PROMISE_RESOLVE_INDEX);
}
@ -1975,6 +2043,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Handle<JSFunction> function =
SimpleCreateFunction(isolate, factory->empty_string(),
Builtins::kInternalPromiseReject, 3, true);
function->shared()->set_native(false);
InstallWithIntrinsicDefaultProto(isolate, function,
Context::PROMISE_INTERNAL_REJECT_INDEX);
}
@ -2191,6 +2260,15 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
native_context()->set_regexp_last_match_info(*last_match_info);
Handle<RegExpMatchInfo> internal_match_info = factory->NewRegExpMatchInfo();
native_context()->set_regexp_internal_match_info(*internal_match_info);
// Force the RegExp constructor to fast properties, so that we can use the
// fast paths for various things like
//
// x instanceof RegExp
//
// etc. We should probably come up with a more principled approach once
// the JavaScript builtins are gone.
JSObject::MigrateSlowToFast(regexp_fun, 0, "Bootstrapping");
}
{ // -- E r r o r
@ -2441,6 +2519,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
CreateFunction(isolate, factory->InternalizeUtf8String("TypedArray"),
JS_TYPED_ARRAY_TYPE, JSTypedArray::kSize, prototype,
Builtins::kIllegal);
typed_array_fun->shared()->set_native(false);
InstallSpeciesGetter(typed_array_fun);
// Install the "constructor" property on the {prototype}.
@ -2479,6 +2558,10 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
values->shared()->set_builtin_function_id(kTypedArrayValues);
JSObject::AddProperty(prototype, factory->iterator_symbol(), values,
DONT_ENUM);
// TODO(caitp): alphasort accessors/methods
SimpleInstallFunction(prototype, "copyWithin",
Builtins::kTypedArrayPrototypeCopyWithin, 2, false);
}
{ // -- T y p e d A r r a y s
@ -2909,6 +2992,8 @@ void Genesis::InitializeExperimentalGlobal() {
HARMONY_STAGED(FEATURE_INITIALIZE_GLOBAL)
HARMONY_SHIPPING(FEATURE_INITIALIZE_GLOBAL)
#undef FEATURE_INITIALIZE_GLOBAL
InitializeGlobal_enable_fast_array_builtins();
}
@ -2979,7 +3064,7 @@ bool Bootstrapper::CompileNative(Isolate* isolate, Vector<const char> name,
// environment has been at least partially initialized. Add a stack check
// before entering JS code to catch overflow early.
StackLimitCheck check(isolate);
if (check.JsHasOverflowed(1 * KB)) {
if (check.JsHasOverflowed(4 * KB)) {
isolate->StackOverflow();
return false;
}
@ -2991,8 +3076,7 @@ bool Bootstrapper::CompileNative(Isolate* isolate, Vector<const char> name,
Handle<SharedFunctionInfo> function_info =
Compiler::GetSharedFunctionInfoForScript(
source, script_name, 0, 0, ScriptOriginOptions(), Handle<Object>(),
context, NULL, NULL, ScriptCompiler::kNoCompileOptions, natives_flag,
false);
context, NULL, NULL, ScriptCompiler::kNoCompileOptions, natives_flag);
if (function_info.is_null()) return false;
DCHECK(context->IsNativeContext());
@ -3055,7 +3139,7 @@ bool Genesis::CompileExtension(Isolate* isolate, v8::Extension* extension) {
function_info = Compiler::GetSharedFunctionInfoForScript(
source, script_name, 0, 0, ScriptOriginOptions(), Handle<Object>(),
context, extension, NULL, ScriptCompiler::kNoCompileOptions,
EXTENSION_CODE, false);
EXTENSION_CODE);
if (function_info.is_null()) return false;
cache->Add(name, function_info);
}
@ -3131,6 +3215,8 @@ void Genesis::ConfigureUtilsObject(GlobalContextType context_type) {
// The utils object can be removed for cases that reach this point.
native_context()->set_natives_utils_object(heap()->undefined_value());
native_context()->set_extras_utils_object(heap()->undefined_value());
native_context()->set_exports_container(heap()->undefined_value());
}
@ -3350,8 +3436,10 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
script_source_mapping_url, attribs);
script_map->AppendDescriptor(&d);
}
}
{
{ // -- A s y n c F u n c t i o n
// Builtin functions for AsyncFunction.
PrototypeIterator iter(native_context->async_function_map());
Handle<JSObject> async_function_prototype(iter.GetCurrent<JSObject>());
@ -3377,14 +3465,56 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
JSFunction::SetPrototype(async_function_constructor,
async_function_prototype);
Handle<JSFunction> async_function_next =
SimpleInstallFunction(container, "AsyncFunctionNext",
Builtins::kGeneratorPrototypeNext, 1, true);
Handle<JSFunction> async_function_throw =
SimpleInstallFunction(container, "AsyncFunctionThrow",
Builtins::kGeneratorPrototypeThrow, 1, true);
async_function_next->shared()->set_native(false);
async_function_throw->shared()->set_native(false);
{
Handle<JSFunction> function =
SimpleCreateFunction(isolate, factory->empty_string(),
Builtins::kAsyncFunctionAwaitCaught, 3, false);
InstallWithIntrinsicDefaultProto(
isolate, function, Context::ASYNC_FUNCTION_AWAIT_CAUGHT_INDEX);
}
{
Handle<JSFunction> function =
SimpleCreateFunction(isolate, factory->empty_string(),
Builtins::kAsyncFunctionAwaitUncaught, 3, false);
InstallWithIntrinsicDefaultProto(
isolate, function, Context::ASYNC_FUNCTION_AWAIT_UNCAUGHT_INDEX);
}
{
Handle<Code> code =
isolate->builtins()->AsyncFunctionAwaitRejectClosure();
Handle<SharedFunctionInfo> info =
factory->NewSharedFunctionInfo(factory->empty_string(), code, false);
info->set_internal_formal_parameter_count(1);
info->set_length(1);
native_context->set_async_function_await_reject_shared_fun(*info);
}
{
Handle<Code> code =
isolate->builtins()->AsyncFunctionAwaitResolveClosure();
Handle<SharedFunctionInfo> info =
factory->NewSharedFunctionInfo(factory->empty_string(), code, false);
info->set_internal_formal_parameter_count(1);
info->set_length(1);
native_context->set_async_function_await_resolve_shared_fun(*info);
}
{
Handle<JSFunction> function =
SimpleCreateFunction(isolate, factory->empty_string(),
Builtins::kAsyncFunctionPromiseCreate, 0, false);
InstallWithIntrinsicDefaultProto(
isolate, function, Context::ASYNC_FUNCTION_PROMISE_CREATE_INDEX);
}
{
Handle<JSFunction> function = SimpleCreateFunction(
isolate, factory->empty_string(),
Builtins::kAsyncFunctionPromiseRelease, 1, false);
InstallWithIntrinsicDefaultProto(
isolate, function, Context::ASYNC_FUNCTION_PROMISE_RELEASE_INDEX);
}
}
@ -3442,24 +3572,7 @@ void Bootstrapper::ExportFromRuntime(Isolate* isolate,
Accessors::FunctionSetPrototype(callsite_fun, proto).Assert();
}
}
}
void Bootstrapper::ExportExperimentalFromRuntime(Isolate* isolate,
Handle<JSObject> container) {
HandleScope scope(isolate);
#ifdef V8_I18N_SUPPORT
#define INITIALIZE_FLAG(FLAG) \
{ \
Handle<String> name = \
isolate->factory()->NewStringFromAsciiChecked(#FLAG); \
JSObject::AddProperty(container, name, \
isolate->factory()->ToBoolean(FLAG), NONE); \
}
#undef INITIALIZE_FLAG
#endif
isolate->native_context()->set_exports_container(*container);
}
@ -3472,14 +3585,13 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_named_captures)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_regexp_property)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_sent)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_tailcalls)
#ifdef V8_I18N_SUPPORT
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(datetime_format_to_parts)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(icu_case_mapping)
#endif
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_restrictive_generators)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_trailing_commas)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_function_tostring)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_class_fields)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_object_spread)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_object_rest_spread)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_dynamic_import)
EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_template_escapes)
void InstallPublicSymbol(Factory* factory, Handle<Context> native_context,
const char* name, Handle<Symbol> value) {
@ -3494,6 +3606,31 @@ void InstallPublicSymbol(Factory* factory, Handle<Context> native_context,
JSObject::AddProperty(symbol, name_string, value, attributes);
}
void Genesis::InitializeGlobal_enable_fast_array_builtins() {
if (!FLAG_enable_fast_array_builtins) return;
Handle<JSGlobalObject> global(native_context()->global_object());
Isolate* isolate = global->GetIsolate();
Factory* factory = isolate->factory();
LookupIterator it1(global, factory->NewStringFromAsciiChecked("Array"),
LookupIterator::OWN_SKIP_INTERCEPTOR);
Handle<Object> array_object = Object::GetProperty(&it1).ToHandleChecked();
LookupIterator it2(array_object,
factory->NewStringFromAsciiChecked("prototype"),
LookupIterator::OWN_SKIP_INTERCEPTOR);
Handle<Object> array_prototype = Object::GetProperty(&it2).ToHandleChecked();
LookupIterator it3(array_prototype,
factory->NewStringFromAsciiChecked("forEach"),
LookupIterator::OWN_SKIP_INTERCEPTOR);
Handle<Object> for_each_function =
Object::GetProperty(&it3).ToHandleChecked();
Handle<JSFunction>::cast(for_each_function)
->set_code(isolate->builtins()->builtin(Builtins::kArrayForEach));
Handle<JSFunction>::cast(for_each_function)
->shared()
->set_code(isolate->builtins()->builtin(Builtins::kArrayForEach));
}
void Genesis::InitializeGlobal_harmony_sharedarraybuffer() {
if (!FLAG_harmony_sharedarraybuffer) return;
@ -3523,38 +3660,6 @@ void Genesis::InitializeGlobal_harmony_sharedarraybuffer() {
Builtins::kAtomicsStore, 3, true);
}
void Genesis::InitializeGlobal_harmony_simd() {
if (!FLAG_harmony_simd) return;
Handle<JSGlobalObject> global(
JSGlobalObject::cast(native_context()->global_object()));
Isolate* isolate = global->GetIsolate();
Factory* factory = isolate->factory();
Handle<String> name = factory->InternalizeUtf8String("SIMD");
Handle<JSFunction> cons = factory->NewFunction(name);
JSFunction::SetInstancePrototype(
cons,
Handle<Object>(native_context()->initial_object_prototype(), isolate));
cons->shared()->set_instance_class_name(*name);
Handle<JSObject> simd_object = factory->NewJSObject(cons, TENURED);
DCHECK(simd_object->IsJSObject());
JSObject::AddProperty(global, name, simd_object, DONT_ENUM);
// Install SIMD type functions. Set the instance class names since
// InstallFunction only does this when we install on the JSGlobalObject.
#define SIMD128_INSTALL_FUNCTION(TYPE, Type, type, lane_count, lane_type) \
Handle<JSFunction> type##_function = InstallFunction( \
simd_object, #Type, JS_VALUE_TYPE, JSValue::kSize, \
isolate->initial_object_prototype(), Builtins::kIllegal); \
native_context()->set_##type##_function(*type##_function); \
type##_function->shared()->set_instance_class_name(*factory->Type##_string());
SIMD128_TYPES(SIMD128_INSTALL_FUNCTION)
#undef SIMD128_INSTALL_FUNCTION
}
void Genesis::InitializeGlobal_harmony_array_prototype_values() {
if (!FLAG_harmony_array_prototype_values) return;
Handle<JSFunction> array_constructor(native_context()->array_function());
@ -3576,6 +3681,143 @@ void Genesis::InitializeGlobal_harmony_array_prototype_values() {
NONE);
}
void Genesis::InitializeGlobal_harmony_async_iteration() {
if (!FLAG_harmony_async_iteration) return;
Handle<JSFunction> symbol_fun(native_context()->symbol_function());
InstallConstant(isolate(), symbol_fun, "asyncIterator",
factory()->async_iterator_symbol());
}
void Genesis::InitializeGlobal_harmony_promise_finally() {
if (!FLAG_harmony_promise_finally) return;
Handle<JSFunction> constructor(native_context()->promise_function());
Handle<JSObject> prototype(JSObject::cast(constructor->instance_prototype()));
SimpleInstallFunction(prototype, "finally", Builtins::kPromiseFinally, 1,
true, DONT_ENUM);
// The promise prototype map has changed because we added a property
// to prototype, so we update the saved map.
Handle<Map> prototype_map(prototype->map());
Map::SetShouldBeFastPrototypeMap(prototype_map, true, isolate());
native_context()->set_promise_prototype_map(*prototype_map);
{
Handle<Code> code =
handle(isolate()->builtins()->builtin(Builtins::kPromiseThenFinally),
isolate());
Handle<SharedFunctionInfo> info = factory()->NewSharedFunctionInfo(
factory()->empty_string(), code, false);
info->set_internal_formal_parameter_count(1);
info->set_length(1);
info->set_native(true);
native_context()->set_promise_then_finally_shared_fun(*info);
}
{
Handle<Code> code =
handle(isolate()->builtins()->builtin(Builtins::kPromiseCatchFinally),
isolate());
Handle<SharedFunctionInfo> info = factory()->NewSharedFunctionInfo(
factory()->empty_string(), code, false);
info->set_internal_formal_parameter_count(1);
info->set_length(1);
info->set_native(true);
native_context()->set_promise_catch_finally_shared_fun(*info);
}
{
Handle<Code> code = handle(
isolate()->builtins()->builtin(Builtins::kPromiseValueThunkFinally),
isolate());
Handle<SharedFunctionInfo> info = factory()->NewSharedFunctionInfo(
factory()->empty_string(), code, false);
info->set_internal_formal_parameter_count(0);
info->set_length(0);
native_context()->set_promise_value_thunk_finally_shared_fun(*info);
}
{
Handle<Code> code =
handle(isolate()->builtins()->builtin(Builtins::kPromiseThrowerFinally),
isolate());
Handle<SharedFunctionInfo> info = factory()->NewSharedFunctionInfo(
factory()->empty_string(), code, false);
info->set_internal_formal_parameter_count(0);
info->set_length(0);
native_context()->set_promise_thrower_finally_shared_fun(*info);
}
}
#ifdef V8_I18N_SUPPORT
void Genesis::InitializeGlobal_datetime_format_to_parts() {
if (!FLAG_datetime_format_to_parts) return;
Handle<JSReceiver> exports_container(
JSReceiver::cast(native_context()->exports_container()));
Handle<JSObject> date_time_format_prototype(JSObject::cast(
native_context()->intl_date_time_format_function()->prototype()));
Handle<JSFunction> format_date_to_parts = Handle<JSFunction>::cast(
JSReceiver::GetProperty(
exports_container,
factory()->InternalizeUtf8String("FormatDateToParts"))
.ToHandleChecked());
InstallFunction(date_time_format_prototype, format_date_to_parts,
factory()->InternalizeUtf8String("formatToParts"));
}
namespace {
void SetFunction(Handle<JSObject> target, Handle<JSFunction> function,
Handle<Name> name, PropertyAttributes attributes = DONT_ENUM) {
JSObject::SetOwnPropertyIgnoreAttributes(target, name, function, attributes)
.ToHandleChecked();
}
} // namespace
void Genesis::InitializeGlobal_icu_case_mapping() {
if (!FLAG_icu_case_mapping) return;
Handle<JSReceiver> exports_container(
JSReceiver::cast(native_context()->exports_container()));
Handle<JSObject> string_prototype(
JSObject::cast(native_context()->string_function()->prototype()));
Handle<JSFunction> to_lower_case = Handle<JSFunction>::cast(
JSReceiver::GetProperty(
exports_container,
factory()->InternalizeUtf8String("ToLowerCaseI18N"))
.ToHandleChecked());
SetFunction(string_prototype, to_lower_case,
factory()->InternalizeUtf8String("toLowerCase"));
Handle<JSFunction> to_upper_case = Handle<JSFunction>::cast(
JSReceiver::GetProperty(
exports_container,
factory()->InternalizeUtf8String("ToUpperCaseI18N"))
.ToHandleChecked());
SetFunction(string_prototype, to_upper_case,
factory()->InternalizeUtf8String("toUpperCase"));
Handle<JSFunction> to_locale_lower_case = Handle<JSFunction>::cast(
JSReceiver::GetProperty(
exports_container,
factory()->InternalizeUtf8String("ToLocaleLowerCaseI18N"))
.ToHandleChecked());
SetFunction(string_prototype, to_locale_lower_case,
factory()->InternalizeUtf8String("toLocaleLowerCase"));
Handle<JSFunction> to_locale_upper_case = Handle<JSFunction>::cast(
JSReceiver::GetProperty(
exports_container,
factory()->InternalizeUtf8String("ToLocaleUpperCaseI18N"))
.ToHandleChecked());
SetFunction(string_prototype, to_locale_upper_case,
factory()->InternalizeUtf8String("toLocaleUpperCase"));
}
#endif
Handle<JSFunction> Genesis::InstallArrayBuffer(Handle<JSObject> target,
const char* name,
Builtins::Name call,
@ -3748,10 +3990,11 @@ bool Genesis::InstallNatives(GlobalContextType context_type) {
// Store the map for the %StringPrototype% after the natives has been compiled
// and the String function has been set up.
Handle<JSFunction> string_function(native_context()->string_function());
DCHECK(JSObject::cast(
string_function->initial_map()->prototype())->HasFastProperties());
JSObject* string_function_prototype =
JSObject::cast(string_function->initial_map()->prototype());
DCHECK(string_function_prototype->HasFastProperties());
native_context()->set_string_function_prototype_map(
HeapObject::cast(string_function->initial_map()->prototype())->map());
string_function_prototype->map());
Handle<JSGlobalObject> global_object =
handle(native_context()->global_object());
@ -4023,8 +4266,6 @@ bool Genesis::InstallExperimentalNatives() {
static const char* harmony_tailcalls_natives[] = {nullptr};
static const char* harmony_sharedarraybuffer_natives[] = {
"native harmony-atomics.js", NULL};
static const char* harmony_simd_natives[] = {"native harmony-simd.js",
nullptr};
static const char* harmony_do_expressions_natives[] = {nullptr};
static const char* harmony_regexp_lookbehind_natives[] = {nullptr};
static const char* harmony_regexp_named_captures_natives[] = {nullptr};
@ -4032,15 +4273,18 @@ bool Genesis::InstallExperimentalNatives() {
static const char* harmony_function_sent_natives[] = {nullptr};
static const char* harmony_array_prototype_values_natives[] = {nullptr};
#ifdef V8_I18N_SUPPORT
static const char* icu_case_mapping_natives[] = {"native icu-case-mapping.js",
nullptr};
static const char* datetime_format_to_parts_natives[] = {
"native datetime-format-to-parts.js", nullptr};
static const char* icu_case_mapping_natives[] = {nullptr};
static const char* datetime_format_to_parts_natives[] = {nullptr};
#endif
static const char* harmony_restrictive_generators_natives[] = {nullptr};
static const char* harmony_trailing_commas_natives[] = {nullptr};
static const char* harmony_function_tostring_natives[] = {nullptr};
static const char* harmony_class_fields_natives[] = {nullptr};
static const char* harmony_object_spread_natives[] = {nullptr};
static const char* harmony_object_rest_spread_natives[] = {nullptr};
static const char* harmony_async_iteration_natives[] = {nullptr};
static const char* harmony_dynamic_import_natives[] = {nullptr};
static const char* harmony_promise_finally_natives[] = {nullptr};
static const char* harmony_template_escapes_natives[] = {nullptr};
for (int i = ExperimentalNatives::GetDebuggerCount();
i < ExperimentalNatives::GetBuiltinsCount(); i++) {
@ -4158,7 +4402,6 @@ void Genesis::InstallExperimentalBuiltinFunctionIds() {
}
}
#undef INSTALL_BUILTIN_ID
@ -4196,25 +4439,7 @@ bool Genesis::InstallSpecialObjects(Handle<Context> native_context) {
WasmJs::Install(isolate);
}
// Expose the debug global object in global if a name for it is specified.
if (FLAG_expose_debug_as != NULL && strlen(FLAG_expose_debug_as) != 0) {
// If loading fails we just bail out without installing the
// debugger but without tanking the whole context.
Debug* debug = isolate->debug();
if (!debug->Load()) return true;
Handle<Context> debug_context = debug->debug_context();
// Set the security token for the debugger context to the same as
// the shell native context to allow calling between these (otherwise
// exposing debug global object doesn't make much sense).
debug_context->set_security_token(native_context->security_token());
Handle<String> debug_string =
factory->InternalizeUtf8String(FLAG_expose_debug_as);
uint32_t index;
if (debug_string->AsArrayIndex(&index)) return true;
Handle<Object> global_proxy(debug_context->global_proxy(), isolate);
JSObject::AddProperty(handle(native_context->global_proxy()), debug_string,
global_proxy, DONT_ENUM);
}
InstallFFIMap(isolate);
return true;
}
@ -4433,6 +4658,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
} else {
DCHECK_EQ(kDescriptor, details.location());
if (details.kind() == kData) {
DCHECK(!FLAG_track_constant_fields);
HandleScope inner(isolate());
Handle<Name> key = Handle<Name>(descs->GetKey(i));
Handle<Object> value(descs->GetValue(i), isolate());
@ -4663,6 +4889,7 @@ Genesis::Genesis(
Handle<JSFunction> empty_function = CreateEmptyFunction(isolate);
CreateStrictModeFunctionMaps(empty_function);
CreateIteratorMaps(empty_function);
CreateAsyncIteratorMaps();
CreateAsyncFunctionMaps(empty_function);
Handle<JSGlobalObject> global_object =
CreateNewGlobals(global_proxy_template, global_proxy);
@ -4690,6 +4917,15 @@ Genesis::Genesis(
if (FLAG_experimental_extras) {
if (!InstallExperimentalExtraNatives()) return;
}
// Store String.prototype's map again in case it has been changed by
// experimental natives.
Handle<JSFunction> string_function(native_context()->string_function());
JSObject* string_function_prototype =
JSObject::cast(string_function->initial_map()->prototype());
DCHECK(string_function_prototype->HasFastProperties());
native_context()->set_string_function_prototype_map(
string_function_prototype->map());
}
// The serializer cannot serialize typed arrays. Reset those typed arrays
// for each new context.
@ -4738,11 +4974,19 @@ Genesis::Genesis(Isolate* isolate,
global_proxy = factory()->NewUninitializedJSGlobalProxy(proxy_size);
}
// CreateNewGlobals.
// Create a remote object as the global object.
Handle<ObjectTemplateInfo> global_proxy_data =
v8::Utils::OpenHandle(*global_proxy_template);
Utils::OpenHandle(*global_proxy_template);
Handle<FunctionTemplateInfo> global_constructor(
FunctionTemplateInfo::cast(global_proxy_data->constructor()));
Handle<ObjectTemplateInfo> global_object_template(
ObjectTemplateInfo::cast(global_constructor->prototype_template()));
Handle<JSObject> global_object =
ApiNatives::InstantiateRemoteObject(
global_object_template).ToHandleChecked();
// (Re)initialize the global proxy object.
Handle<SharedFunctionInfo> shared =
FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(isolate,
global_constructor);
@ -4758,19 +5002,20 @@ Genesis::Genesis(Isolate* isolate,
JSFunction::SetInitialMap(global_proxy_function, global_proxy_map,
factory()->null_value());
global_proxy_map->set_is_access_check_needed(true);
global_proxy_map->set_is_callable();
global_proxy_map->set_is_constructor(true);
global_proxy_map->set_has_hidden_prototype(true);
Handle<String> global_name = factory()->global_string();
global_proxy_function->shared()->set_instance_class_name(*global_name);
factory()->ReinitializeJSGlobalProxy(global_proxy, global_proxy_function);
// GlobalProxy.
// A remote global proxy has no native context.
global_proxy->set_native_context(heap()->null_value());
// DetachGlobal.
JSObject::ForceSetPrototype(global_proxy, factory()->null_value());
// Configure the hidden prototype chain of the global proxy.
JSObject::ForceSetPrototype(global_proxy, global_object);
// TODO(dcheng): This is a hack. Why does this need to be manually called
// here? Line 4812 should have taken care of it?
global_proxy->map()->set_has_hidden_prototype(true);
global_proxy_ = global_proxy;
}

View File

@ -121,8 +121,6 @@ class Bootstrapper final {
static bool CompileExperimentalExtraBuiltin(Isolate* isolate, int index);
static void ExportFromRuntime(Isolate* isolate, Handle<JSObject> container);
static void ExportExperimentalFromRuntime(Isolate* isolate,
Handle<JSObject> container);
private:
Isolate* isolate_;

View File

@ -552,6 +552,8 @@ namespace {
void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
bool create_implicit_receiver,
bool check_derived_construct) {
Label post_instantiation_deopt_entry;
// ----------- S t a t e -------------
// -- r0 : number of arguments
// -- r1 : constructor function
@ -601,6 +603,9 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
__ PushRoot(Heap::kTheHoleValueRootIndex);
}
// Deoptimizer re-enters stub code here.
__ bind(&post_instantiation_deopt_entry);
// Set up pointer to last argument.
__ add(r2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
@ -633,7 +638,8 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
// Store offset of return address for deoptimizer.
if (create_implicit_receiver && !is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
masm->pc_offset());
}
// Restore context from the frame.
@ -697,6 +703,35 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
__ IncrementCounter(isolate->counters()->constructed_objects(), 1, r1, r2);
}
__ Jump(lr);
// Store offset of trampoline address for deoptimizer. This is the bailout
// point after the receiver instantiation but before the function invocation.
// We need to restore some registers in order to continue the above code.
if (create_implicit_receiver && !is_api_function) {
masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
masm->pc_offset());
// ----------- S t a t e -------------
// -- r0 : newly allocated object
// -- sp[0] : constructor function
// -----------------------------------
__ pop(r1);
__ push(r0);
__ push(r0);
// Retrieve smi-tagged arguments count from the stack.
__ ldr(r0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
__ SmiUntag(r0);
// Retrieve the new target value from the stack. This was placed into the
// frame description in place of the receiver by the optimizing compiler.
__ add(r3, fp, Operand(StandardFrameConstants::kCallerSPOffset));
__ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2));
// Continue with constructor function invocation.
__ b(&post_instantiation_deopt_entry);
}
}
} // namespace
@ -1002,7 +1037,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Register debug_info = kInterpreterBytecodeArrayRegister;
DCHECK(!debug_info.is(r0));
__ ldr(debug_info, FieldMemOperand(r0, SharedFunctionInfo::kDebugInfoOffset));
__ cmp(debug_info, Operand(DebugInfo::uninitialized()));
__ SmiTst(debug_info);
// Load original bytecode array or the debug copy.
__ ldr(kInterpreterBytecodeArrayRegister,
FieldMemOperand(r0, SharedFunctionInfo::kFunctionDataOffset), eq);
@ -1016,8 +1051,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ b(ne, &switch_to_different_code_kind);
// Increment invocation count for the function.
__ ldr(r2, FieldMemOperand(r1, JSFunction::kLiteralsOffset));
__ ldr(r2, FieldMemOperand(r2, LiteralsArray::kFeedbackVectorOffset));
__ ldr(r2, FieldMemOperand(r1, JSFunction::kFeedbackVectorOffset));
__ ldr(r2, FieldMemOperand(r2, Cell::kValueOffset));
__ ldr(r9, FieldMemOperand(
r2, FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
@ -1148,7 +1183,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
// static
void Builtins::Generate_InterpreterPushArgsAndCallImpl(
MacroAssembler* masm, TailCallMode tail_call_mode,
CallableType function_type) {
InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
// -- r0 : the number of arguments (not including the receiver)
// -- r2 : the address of the first argument to be pushed. Subsequent
@ -1164,12 +1199,14 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
Generate_InterpreterPushArgs(masm, r3, r2, r4, r5, &stack_overflow);
// Call the target.
if (function_type == CallableType::kJSFunction) {
if (mode == InterpreterPushArgsMode::kJSFunction) {
__ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
tail_call_mode),
RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Jump(masm->isolate()->builtins()->CallWithSpread(),
RelocInfo::CODE_TARGET);
} else {
DCHECK_EQ(function_type, CallableType::kAny);
__ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
tail_call_mode),
RelocInfo::CODE_TARGET);
@ -1185,7 +1222,7 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
// static
void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
MacroAssembler* masm, CallableType construct_type) {
MacroAssembler* masm, InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
// -- r0 : argument count (not including receiver)
// -- r3 : new target
@ -1203,7 +1240,7 @@ void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
Generate_InterpreterPushArgs(masm, r0, r4, r5, r6, &stack_overflow);
__ AssertUndefinedOrAllocationSite(r2, r5);
if (construct_type == CallableType::kJSFunction) {
if (mode == InterpreterPushArgsMode::kJSFunction) {
__ AssertFunction(r1);
// Tail call to the function-specific construct stub (still in the caller
@ -1212,9 +1249,12 @@ void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
__ ldr(r4, FieldMemOperand(r4, SharedFunctionInfo::kConstructStubOffset));
// Jump to the construct function.
__ add(pc, r4, Operand(Code::kHeaderSize - kHeapObjectTag));
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Call the constructor with r0, r1, and r3 unmodified.
__ Jump(masm->isolate()->builtins()->ConstructWithSpread(),
RelocInfo::CODE_TARGET);
} else {
DCHECK_EQ(construct_type, CallableType::kAny);
DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
// Call the constructor with r0, r1, and r3 unmodified.
__ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
@ -1336,20 +1376,26 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
Register argument_count = r0;
Register closure = r1;
Register new_target = r3;
Register map = argument_count;
Register index = r2;
// Do we have a valid feedback vector?
__ ldr(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ ldr(index, FieldMemOperand(index, Cell::kValueOffset));
__ JumpIfRoot(index, Heap::kUndefinedValueRootIndex,
&gotta_call_runtime_no_stack);
__ push(argument_count);
__ push(new_target);
__ push(closure);
Register map = argument_count;
Register index = r2;
__ ldr(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ ldr(map,
FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
__ ldr(index, FieldMemOperand(map, FixedArray::kLengthOffset));
__ cmp(index, Operand(Smi::FromInt(2)));
__ b(lt, &gotta_call_runtime);
__ b(lt, &try_shared);
// Find literals.
// r3 : native context
// r2 : length / index
// r0 : optimized code map
@ -1369,20 +1415,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
__ cmp(temp, native_context);
__ b(ne, &loop_bottom);
// Literals available?
__ ldr(temp, FieldMemOperand(array_pointer,
SharedFunctionInfo::kOffsetToPreviousLiterals));
__ ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
__ JumpIfSmi(temp, &gotta_call_runtime);
// Save the literals in the closure.
__ ldr(r4, MemOperand(sp, 0));
__ str(temp, FieldMemOperand(r4, JSFunction::kLiteralsOffset));
__ push(index);
__ RecordWriteField(r4, JSFunction::kLiteralsOffset, temp, index,
kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ pop(index);
// Code available?
Register entry = r4;
@ -1392,7 +1424,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
// Found literals and code. Get them into the closure and return.
// Found code. Get it into the closure and return.
__ pop(closure);
// Store code entry in the closure.
__ add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
@ -1427,9 +1459,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ cmp(index, Operand(Smi::FromInt(1)));
__ b(gt, &loop_top);
// We found neither literals nor code.
__ jmp(&gotta_call_runtime);
// We found no code.
__ bind(&try_shared);
__ pop(closure);
__ pop(new_target);
@ -2063,20 +2093,20 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ bind(&target_not_constructor);
{
__ str(r1, MemOperand(sp, 0));
__ TailCallRuntime(Runtime::kThrowCalledNonCallable);
__ TailCallRuntime(Runtime::kThrowNotConstructor);
}
// 4c. The new.target is not a constructor, throw an appropriate TypeError.
__ bind(&new_target_not_constructor);
{
__ str(r3, MemOperand(sp, 0));
__ TailCallRuntime(Runtime::kThrowCalledNonCallable);
__ TailCallRuntime(Runtime::kThrowNotConstructor);
}
}
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ SmiTag(r0);
__ mov(r4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ mov(r4, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ stm(db_w, sp, r0.bit() | r1.bit() | r4.bit() |
(FLAG_enable_embedded_constant_pool ? pp.bit() : 0) |
fp.bit() | lr.bit());
@ -2244,6 +2274,72 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
}
}
// static
void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
Handle<Code> code) {
// ----------- S t a t e -------------
// -- r1 : the target to call (can be any Object)
// -- r2 : start index (to support rest parameters)
// -- lr : return address.
// -- sp[0] : thisArgument
// -----------------------------------
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
__ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ ldr(ip, MemOperand(r3, CommonFrameConstants::kContextOrFrameTypeOffset));
__ cmp(ip, Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(eq, &arguments_adaptor);
{
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r0, FieldMemOperand(
r0, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(r3, fp);
}
__ b(&arguments_done);
__ bind(&arguments_adaptor);
{
// Load the length from the ArgumentsAdaptorFrame.
__ ldr(r0, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset));
}
__ bind(&arguments_done);
Label stack_empty, stack_done, stack_overflow;
__ SmiUntag(r0);
__ sub(r0, r0, r2, SetCC);
__ b(le, &stack_empty);
{
// Check for stack overflow.
Generate_StackOverflowCheck(masm, r0, r2, &stack_overflow);
// Forward the arguments from the caller frame.
{
Label loop;
__ add(r3, r3, Operand(kPointerSize));
__ mov(r2, r0);
__ bind(&loop);
{
__ ldr(ip, MemOperand(r3, r2, LSL, kPointerSizeLog2));
__ push(ip);
__ sub(r2, r2, Operand(1), SetCC);
__ b(ne, &loop);
}
}
}
__ b(&stack_done);
__ bind(&stack_overflow);
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ bind(&stack_empty);
{
// We just pass the receiver, which is already on the stack.
__ mov(r0, Operand(0));
}
__ bind(&stack_done);
__ Jump(code, RelocInfo::CODE_TARGET);
}
namespace {
// Drops top JavaScript frame and an arguments adaptor frame below it (if
@ -2294,7 +2390,7 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
Label no_interpreter_frame;
__ ldr(scratch3,
MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ cmp(scratch3, Operand(Smi::FromInt(StackFrame::STUB)));
__ cmp(scratch3, Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
__ b(ne, &no_interpreter_frame);
__ ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ bind(&no_interpreter_frame);
@ -2306,7 +2402,8 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
__ ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ ldr(scratch3,
MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
__ cmp(scratch3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ cmp(scratch3,
Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ b(ne, &no_arguments_adaptor);
// Drop current frame and load arguments count from arguments adaptor frame.
@ -2614,6 +2711,161 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
}
}
static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
Register argc = r0;
Register constructor = r1;
Register new_target = r3;
Register scratch = r2;
Register scratch2 = r6;
Register spread = r4;
Register spread_map = r5;
Register spread_len = r5;
Label runtime_call, push_args;
__ ldr(spread, MemOperand(sp, 0));
__ JumpIfSmi(spread, &runtime_call);
__ ldr(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset));
// Check that the spread is an array.
__ CompareInstanceType(spread_map, scratch, JS_ARRAY_TYPE);
__ b(ne, &runtime_call);
// Check that we have the original ArrayPrototype.
__ ldr(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset));
__ ldr(scratch2, NativeContextMemOperand());
__ ldr(scratch2,
ContextMemOperand(scratch2, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
__ cmp(scratch, scratch2);
__ b(ne, &runtime_call);
// Check that the ArrayPrototype hasn't been modified in a way that would
// affect iteration.
__ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
__ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
__ cmp(scratch, Operand(Smi::FromInt(Isolate::kProtectorValid)));
__ b(ne, &runtime_call);
// Check that the map of the initial array iterator hasn't changed.
__ ldr(scratch2, NativeContextMemOperand());
__ ldr(scratch,
ContextMemOperand(scratch2,
Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
__ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
__ ldr(scratch2,
ContextMemOperand(
scratch2, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
__ cmp(scratch, scratch2);
__ b(ne, &runtime_call);
// For FastPacked kinds, iteration will have the same effect as simply
// accessing each property in order.
Label no_protector_check;
__ ldr(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset));
__ DecodeField<Map::ElementsKindBits>(scratch);
__ cmp(scratch, Operand(FAST_HOLEY_ELEMENTS));
__ b(hi, &runtime_call);
// For non-FastHoley kinds, we can skip the protector check.
__ cmp(scratch, Operand(FAST_SMI_ELEMENTS));
__ b(eq, &no_protector_check);
__ cmp(scratch, Operand(FAST_ELEMENTS));
__ b(eq, &no_protector_check);
// Check the ArrayProtector cell.
__ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
__ ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
__ cmp(scratch, Operand(Smi::FromInt(Isolate::kProtectorValid)));
__ b(ne, &runtime_call);
__ bind(&no_protector_check);
// Load the FixedArray backing store, but use the length from the array.
__ ldr(spread_len, FieldMemOperand(spread, JSArray::kLengthOffset));
__ SmiUntag(spread_len);
__ ldr(spread, FieldMemOperand(spread, JSArray::kElementsOffset));
__ b(&push_args);
__ bind(&runtime_call);
{
// Call the builtin for the result of the spread.
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(argc);
__ Push(constructor);
__ Push(new_target);
__ Push(argc);
__ Push(spread);
__ CallRuntime(Runtime::kSpreadIterableFixed);
__ mov(spread, r0);
__ Pop(argc);
__ Pop(new_target);
__ Pop(constructor);
__ SmiUntag(argc);
}
{
// Calculate the new nargs including the result of the spread.
__ ldr(spread_len, FieldMemOperand(spread, FixedArray::kLengthOffset));
__ SmiUntag(spread_len);
__ bind(&push_args);
// argc += spread_len - 1. Subtract 1 for the spread itself.
__ add(argc, argc, spread_len);
__ sub(argc, argc, Operand(1));
// Pop the spread argument off the stack.
__ Pop(scratch);
}
// Check for stack overflow.
{
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label done;
__ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
// Make scratch the space we have left. The stack might already be
// overflowed here which will cause scratch to become negative.
__ sub(scratch, sp, scratch);
// Check if the arguments will overflow the stack.
__ cmp(scratch, Operand(spread_len, LSL, kPointerSizeLog2));
__ b(gt, &done); // Signed comparison.
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ bind(&done);
}
// Put the evaluated spread onto the stack as additional arguments.
{
__ mov(scratch, Operand(0));
Label done, push, loop;
__ bind(&loop);
__ cmp(scratch, spread_len);
__ b(eq, &done);
__ add(scratch2, spread, Operand(scratch, LSL, kPointerSizeLog2));
__ ldr(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize));
__ JumpIfNotRoot(scratch2, Heap::kTheHoleValueRootIndex, &push);
__ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
__ bind(&push);
__ Push(scratch2);
__ add(scratch, scratch, Operand(1));
__ b(&loop);
__ bind(&done);
}
}
// static
void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : the number of arguments (not including the receiver)
// -- r1 : the constructor to call (can be any Object)
// -----------------------------------
// CheckSpreadAndPushToStack will push r3 to save it.
__ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
CheckSpreadAndPushToStack(masm);
__ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
TailCallMode::kDisallow),
RelocInfo::CODE_TARGET);
}
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
@ -2728,6 +2980,19 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
// static
void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : the number of arguments (not including the receiver)
// -- r1 : the constructor to call (can be any Object)
// -- r3 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
CheckSpreadAndPushToStack(masm);
__ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------

View File

@ -540,6 +540,8 @@ namespace {
void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
bool create_implicit_receiver,
bool check_derived_construct) {
Label post_instantiation_deopt_entry;
// ----------- S t a t e -------------
// -- x0 : number of arguments
// -- x1 : constructor function
@ -597,6 +599,9 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
__ PushRoot(Heap::kTheHoleValueRootIndex);
}
// Deoptimizer re-enters stub code here.
__ Bind(&post_instantiation_deopt_entry);
// Set up pointer to last argument.
__ Add(x2, fp, StandardFrameConstants::kCallerSPOffset);
@ -635,7 +640,8 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
// Store offset of return address for deoptimizer.
if (create_implicit_receiver && !is_api_function) {
masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
masm->pc_offset());
}
// Restore the context from the frame.
@ -698,6 +704,34 @@ void Generate_JSConstructStubHelper(MacroAssembler* masm, bool is_api_function,
__ IncrementCounter(isolate->counters()->constructed_objects(), 1, x1, x2);
}
__ Ret();
// Store offset of trampoline address for deoptimizer. This is the bailout
// point after the receiver instantiation but before the function invocation.
// We need to restore some registers in order to continue the above code.
if (create_implicit_receiver && !is_api_function) {
masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
masm->pc_offset());
// ----------- S t a t e -------------
// -- x0 : newly allocated object
// -- sp[0] : constructor function
// -----------------------------------
__ Pop(x1);
__ Push(x0, x0);
// Retrieve smi-tagged arguments count from the stack.
__ Ldr(x0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
__ SmiUntag(x0);
// Retrieve the new target value from the stack. This was placed into the
// frame description in place of the receiver by the optimizing compiler.
__ Add(x3, fp, Operand(StandardFrameConstants::kCallerSPOffset));
__ Ldr(x3, MemOperand(x3, x0, LSL, kPointerSizeLog2));
// Continue with constructor function invocation.
__ B(&post_instantiation_deopt_entry);
}
}
} // namespace
@ -1007,8 +1041,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Label load_debug_bytecode_array, bytecode_array_loaded;
DCHECK(!debug_info.is(x0));
__ Ldr(debug_info, FieldMemOperand(x0, SharedFunctionInfo::kDebugInfoOffset));
__ Cmp(debug_info, Operand(DebugInfo::uninitialized()));
__ B(ne, &load_debug_bytecode_array);
__ JumpIfNotSmi(debug_info, &load_debug_bytecode_array);
__ Ldr(kInterpreterBytecodeArrayRegister,
FieldMemOperand(x0, SharedFunctionInfo::kFunctionDataOffset));
__ Bind(&bytecode_array_loaded);
@ -1020,10 +1053,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ B(ne, &switch_to_different_code_kind);
// Increment invocation count for the function.
__ Ldr(x11, FieldMemOperand(x1, JSFunction::kLiteralsOffset));
__ Ldr(x11, FieldMemOperand(x11, LiteralsArray::kFeedbackVectorOffset));
__ Ldr(x10, FieldMemOperand(x11, FeedbackVector::kInvocationCountIndex *
kPointerSize +
__ Ldr(x11, FieldMemOperand(x1, JSFunction::kFeedbackVectorOffset));
__ Ldr(x11, FieldMemOperand(x11, Cell::kValueOffset));
__ Ldr(x10, FieldMemOperand(
x11, FeedbackVector::kInvocationCountIndex * kPointerSize +
FeedbackVector::kHeaderSize));
__ Add(x10, x10, Operand(Smi::FromInt(1)));
__ Str(x10, FieldMemOperand(
@ -1163,7 +1196,7 @@ static void Generate_InterpreterPushArgs(MacroAssembler* masm,
// static
void Builtins::Generate_InterpreterPushArgsAndCallImpl(
MacroAssembler* masm, TailCallMode tail_call_mode,
CallableType function_type) {
InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
// -- x0 : the number of arguments (not including the receiver)
// -- x2 : the address of the first argument to be pushed. Subsequent
@ -1180,12 +1213,14 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
Generate_InterpreterPushArgs(masm, x3, x2, x4, x5, x6, &stack_overflow);
// Call the target.
if (function_type == CallableType::kJSFunction) {
if (mode == InterpreterPushArgsMode::kJSFunction) {
__ Jump(masm->isolate()->builtins()->CallFunction(ConvertReceiverMode::kAny,
tail_call_mode),
RelocInfo::CODE_TARGET);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
__ Jump(masm->isolate()->builtins()->CallWithSpread(),
RelocInfo::CODE_TARGET);
} else {
DCHECK_EQ(function_type, CallableType::kAny);
__ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
tail_call_mode),
RelocInfo::CODE_TARGET);
@ -1200,7 +1235,7 @@ void Builtins::Generate_InterpreterPushArgsAndCallImpl(
// static
void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
MacroAssembler* masm, CallableType construct_type) {
MacroAssembler* masm, InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
// -- x0 : argument count (not including receiver)
// -- x3 : new target
@ -1217,7 +1252,7 @@ void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
Generate_InterpreterPushArgs(masm, x0, x4, x5, x6, x7, &stack_overflow);
__ AssertUndefinedOrAllocationSite(x2, x6);
if (construct_type == CallableType::kJSFunction) {
if (mode == InterpreterPushArgsMode::kJSFunction) {
__ AssertFunction(x1);
// Tail call to the function-specific construct stub (still in the caller
@ -1226,8 +1261,12 @@ void Builtins::Generate_InterpreterPushArgsAndConstructImpl(
__ Ldr(x4, FieldMemOperand(x4, SharedFunctionInfo::kConstructStubOffset));
__ Add(x4, x4, Code::kHeaderSize - kHeapObjectTag);
__ Br(x4);
} else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Call the constructor with x0, x1, and x3 unmodified.
__ Jump(masm->isolate()->builtins()->ConstructWithSpread(),
RelocInfo::CODE_TARGET);
} else {
DCHECK_EQ(construct_type, CallableType::kAny);
DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
// Call the constructor with x0, x1, and x3 unmodified.
__ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
@ -1346,14 +1385,19 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
Register closure = x1;
Register map = x13;
Register index = x2;
// Do we have a valid feedback vector?
__ Ldr(index, FieldMemOperand(closure, JSFunction::kFeedbackVectorOffset));
__ Ldr(index, FieldMemOperand(index, Cell::kValueOffset));
__ JumpIfRoot(index, Heap::kUndefinedValueRootIndex, &gotta_call_runtime);
__ Ldr(map, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
__ Ldr(map,
FieldMemOperand(map, SharedFunctionInfo::kOptimizedCodeMapOffset));
__ Ldrsw(index, UntagSmiFieldMemOperand(map, FixedArray::kLengthOffset));
__ Cmp(index, Operand(2));
__ B(lt, &gotta_call_runtime);
__ B(lt, &try_shared);
// Find literals.
// x3 : native context
// x2 : length / index
// x13 : optimized code map
@ -1373,17 +1417,6 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ Ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
__ Cmp(temp, native_context);
__ B(ne, &loop_bottom);
// Literals available?
__ Ldr(temp, FieldMemOperand(array_pointer,
SharedFunctionInfo::kOffsetToPreviousLiterals));
__ Ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
__ JumpIfSmi(temp, &gotta_call_runtime);
// Save the literals in the closure.
__ Str(temp, FieldMemOperand(closure, JSFunction::kLiteralsOffset));
__ RecordWriteField(closure, JSFunction::kLiteralsOffset, temp, x7,
kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Code available?
Register entry = x7;
@ -1393,7 +1426,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ Ldr(entry, FieldMemOperand(entry, WeakCell::kValueOffset));
__ JumpIfSmi(entry, &try_shared);
// Found literals and code. Get them into the closure and return.
// Found code. Get it into the closure and return.
__ Add(entry, entry, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Str(entry, FieldMemOperand(closure, JSFunction::kCodeEntryOffset));
__ RecordWriteCodeEntryField(closure, entry, x5);
@ -1422,9 +1455,7 @@ void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
__ Cmp(index, Operand(1));
__ B(gt, &loop_top);
// We found neither literals nor code.
__ B(&gotta_call_runtime);
// We found no code.
__ Bind(&try_shared);
__ Ldr(entry,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
@ -2117,20 +2148,20 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ Bind(&target_not_constructor);
{
__ Poke(target, 0);
__ TailCallRuntime(Runtime::kThrowCalledNonCallable);
__ TailCallRuntime(Runtime::kThrowNotConstructor);
}
// 4c. The new.target is not a constructor, throw an appropriate TypeError.
__ Bind(&new_target_not_constructor);
{
__ Poke(new_target, 0);
__ TailCallRuntime(Runtime::kThrowCalledNonCallable);
__ TailCallRuntime(Runtime::kThrowNotConstructor);
}
}
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
__ SmiTag(x10, x0);
__ Mov(x11, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ Mov(x11, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR));
__ Push(lr, fp);
__ Push(x11, x1, x10);
__ Add(fp, jssp,
@ -2328,6 +2359,72 @@ void Builtins::Generate_Apply(MacroAssembler* masm) {
}
}
// static
void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm,
Handle<Code> code) {
// ----------- S t a t e -------------
// -- x1 : the target to call (can be any Object)
// -- x2 : start index (to support rest parameters)
// -- lr : return address.
// -- sp[0] : thisArgument
// -----------------------------------
// Check if we have an arguments adaptor frame below the function frame.
Label arguments_adaptor, arguments_done;
__ Ldr(x3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ Ldr(x4, MemOperand(x3, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Cmp(x4, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR));
__ B(eq, &arguments_adaptor);
{
__ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ Ldr(x0, FieldMemOperand(x0, JSFunction::kSharedFunctionInfoOffset));
__ Ldrsw(x0, FieldMemOperand(
x0, SharedFunctionInfo::kFormalParameterCountOffset));
__ Mov(x3, fp);
}
__ B(&arguments_done);
__ Bind(&arguments_adaptor);
{
// Just load the length from ArgumentsAdaptorFrame.
__ Ldrsw(x0, UntagSmiMemOperand(
x3, ArgumentsAdaptorFrameConstants::kLengthOffset));
}
__ Bind(&arguments_done);
Label stack_empty, stack_done, stack_overflow;
__ Subs(x0, x0, x2);
__ B(le, &stack_empty);
{
// Check for stack overflow.
Generate_StackOverflowCheck(masm, x0, x2, &stack_overflow);
// Forward the arguments from the caller frame.
{
Label loop;
__ Add(x3, x3, kPointerSize);
__ Mov(x2, x0);
__ bind(&loop);
{
__ Ldr(x4, MemOperand(x3, x2, LSL, kPointerSizeLog2));
__ Push(x4);
__ Subs(x2, x2, 1);
__ B(ne, &loop);
}
}
}
__ B(&stack_done);
__ Bind(&stack_overflow);
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ Bind(&stack_empty);
{
// We just pass the receiver, which is already on the stack.
__ Mov(x0, 0);
}
__ Bind(&stack_done);
__ Jump(code, RelocInfo::CODE_TARGET);
}
namespace {
// Drops top JavaScript frame and an arguments adaptor frame below it (if
@ -2378,7 +2475,7 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
Label no_interpreter_frame;
__ Ldr(scratch3,
MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Cmp(scratch3, Operand(Smi::FromInt(StackFrame::STUB)));
__ Cmp(scratch3, Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
__ B(ne, &no_interpreter_frame);
__ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ bind(&no_interpreter_frame);
@ -2390,7 +2487,8 @@ void PrepareForTailCall(MacroAssembler* masm, Register args_reg,
__ Ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
__ Ldr(scratch3,
MemOperand(scratch2, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Cmp(scratch3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
__ Cmp(scratch3,
Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ B(ne, &no_arguments_adaptor);
// Drop current frame and load arguments count from arguments adaptor frame.
@ -2693,6 +2791,155 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode,
}
}
static void CheckSpreadAndPushToStack(MacroAssembler* masm) {
Register argc = x0;
Register constructor = x1;
Register new_target = x3;
Register scratch = x2;
Register scratch2 = x6;
Register spread = x4;
Register spread_map = x5;
Register spread_len = x5;
Label runtime_call, push_args;
__ Peek(spread, 0);
__ JumpIfSmi(spread, &runtime_call);
__ Ldr(spread_map, FieldMemOperand(spread, HeapObject::kMapOffset));
// Check that the spread is an array.
__ CompareInstanceType(spread_map, scratch, JS_ARRAY_TYPE);
__ B(ne, &runtime_call);
// Check that we have the original ArrayPrototype.
__ Ldr(scratch, FieldMemOperand(spread_map, Map::kPrototypeOffset));
__ Ldr(scratch2, NativeContextMemOperand());
__ Ldr(scratch2,
ContextMemOperand(scratch2, Context::INITIAL_ARRAY_PROTOTYPE_INDEX));
__ Cmp(scratch, scratch2);
__ B(ne, &runtime_call);
// Check that the ArrayPrototype hasn't been modified in a way that would
// affect iteration.
__ LoadRoot(scratch, Heap::kArrayIteratorProtectorRootIndex);
__ Ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
__ Cmp(scratch, Smi::FromInt(Isolate::kProtectorValid));
__ B(ne, &runtime_call);
// Check that the map of the initial array iterator hasn't changed.
__ Ldr(scratch2, NativeContextMemOperand());
__ Ldr(scratch,
ContextMemOperand(scratch2,
Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_INDEX));
__ Ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
__ Ldr(scratch2,
ContextMemOperand(
scratch2, Context::INITIAL_ARRAY_ITERATOR_PROTOTYPE_MAP_INDEX));
__ Cmp(scratch, scratch2);
__ B(ne, &runtime_call);
// For FastPacked kinds, iteration will have the same effect as simply
// accessing each property in order.
Label no_protector_check;
__ Ldr(scratch, FieldMemOperand(spread_map, Map::kBitField2Offset));
__ DecodeField<Map::ElementsKindBits>(scratch);
__ Cmp(scratch, FAST_HOLEY_ELEMENTS);
__ B(hi, &runtime_call);
// For non-FastHoley kinds, we can skip the protector check.
__ Cmp(scratch, FAST_SMI_ELEMENTS);
__ B(eq, &no_protector_check);
__ Cmp(scratch, FAST_ELEMENTS);
__ B(eq, &no_protector_check);
// Check the ArrayProtector cell.
__ LoadRoot(scratch, Heap::kArrayProtectorRootIndex);
__ Ldr(scratch, FieldMemOperand(scratch, PropertyCell::kValueOffset));
__ Cmp(scratch, Smi::FromInt(Isolate::kProtectorValid));
__ B(ne, &runtime_call);
__ Bind(&no_protector_check);
// Load the FixedArray backing store, but use the length from the array.
__ Ldrsw(spread_len, UntagSmiFieldMemOperand(spread, JSArray::kLengthOffset));
__ Ldr(spread, FieldMemOperand(spread, JSArray::kElementsOffset));
__ B(&push_args);
__ Bind(&runtime_call);
{
// Call the builtin for the result of the spread.
FrameScope scope(masm, StackFrame::INTERNAL);
__ SmiTag(argc);
__ Push(constructor, new_target, argc, spread);
__ CallRuntime(Runtime::kSpreadIterableFixed);
__ Mov(spread, x0);
__ Pop(argc, new_target, constructor);
__ SmiUntag(argc);
}
{
// Calculate the new nargs including the result of the spread.
__ Ldrsw(spread_len,
UntagSmiFieldMemOperand(spread, FixedArray::kLengthOffset));
__ Bind(&push_args);
// argc += spread_len - 1. Subtract 1 for the spread itself.
__ Add(argc, argc, spread_len);
__ Sub(argc, argc, 1);
// Pop the spread argument off the stack.
__ Pop(scratch);
}
// Check for stack overflow.
{
// Check the stack for overflow. We are not trying to catch interruptions
// (i.e. debug break and preemption) here, so check the "real stack limit".
Label done;
__ LoadRoot(scratch, Heap::kRealStackLimitRootIndex);
// Make scratch the space we have left. The stack might already be
// overflowed here which will cause scratch to become negative.
__ Sub(scratch, masm->StackPointer(), scratch);
// Check if the arguments will overflow the stack.
__ Cmp(scratch, Operand(spread_len, LSL, kPointerSizeLog2));
__ B(gt, &done); // Signed comparison.
__ TailCallRuntime(Runtime::kThrowStackOverflow);
__ Bind(&done);
}
// Put the evaluated spread onto the stack as additional arguments.
{
__ Mov(scratch, 0);
Label done, push, loop;
__ Bind(&loop);
__ Cmp(scratch, spread_len);
__ B(eq, &done);
__ Add(scratch2, spread, Operand(scratch, LSL, kPointerSizeLog2));
__ Ldr(scratch2, FieldMemOperand(scratch2, FixedArray::kHeaderSize));
__ JumpIfNotRoot(scratch2, Heap::kTheHoleValueRootIndex, &push);
__ LoadRoot(scratch2, Heap::kUndefinedValueRootIndex);
__ bind(&push);
__ Push(scratch2);
__ Add(scratch, scratch, Operand(1));
__ B(&loop);
__ Bind(&done);
}
}
// static
void Builtins::Generate_CallWithSpread(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : the number of arguments (not including the receiver)
// -- x1 : the constructor to call (can be any Object)
// -----------------------------------
// CheckSpreadAndPushToStack will push r3 to save it.
__ LoadRoot(x3, Heap::kUndefinedValueRootIndex);
CheckSpreadAndPushToStack(masm);
__ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny,
TailCallMode::kDisallow),
RelocInfo::CODE_TARGET);
}
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
@ -2813,6 +3060,19 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
// static
void Builtins::Generate_ConstructWithSpread(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : the number of arguments (not including the receiver)
// -- x1 : the constructor to call (can be any Object)
// -- x3 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -----------------------------------
CheckSpreadAndPushToStack(masm);
__ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_AllocateInNewSpace");

View File

@ -7,6 +7,10 @@
#include "src/api-arguments.h"
#include "src/api-natives.h"
#include "src/builtins/builtins-utils.h"
#include "src/counters.h"
#include "src/log.h"
#include "src/objects-inl.h"
#include "src/prototype.h"
namespace v8 {
namespace internal {

View File

@ -0,0 +1,425 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/builtins/builtins-arguments.h"
#include "src/builtins/builtins-utils.h"
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
#include "src/interface-descriptors.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
typedef compiler::Node Node;
std::tuple<Node*, Node*, Node*>
ArgumentsBuiltinsAssembler::GetArgumentsFrameAndCount(Node* function,
ParameterMode mode) {
CSA_ASSERT(this, HasInstanceType(function, JS_FUNCTION_TYPE));
Variable frame_ptr(this, MachineType::PointerRepresentation());
frame_ptr.Bind(LoadParentFramePointer());
CSA_ASSERT(this,
WordEqual(function,
LoadBufferObject(frame_ptr.value(),
StandardFrameConstants::kFunctionOffset,
MachineType::Pointer())));
Variable argument_count(this, ParameterRepresentation(mode));
VariableList list({&frame_ptr, &argument_count}, zone());
Label done_argument_count(this, list);
// Determine the number of passed parameters, which is either the count stored
// in an arguments adapter frame or fetched from the shared function info.
Node* frame_ptr_above = LoadBufferObject(
frame_ptr.value(), StandardFrameConstants::kCallerFPOffset,
MachineType::Pointer());
Node* shared =
LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset);
Node* formal_parameter_count = LoadSharedFunctionInfoSpecialField(
shared, SharedFunctionInfo::kFormalParameterCountOffset, mode);
argument_count.Bind(formal_parameter_count);
Node* marker_or_function = LoadBufferObject(
frame_ptr_above, CommonFrameConstants::kContextOrFrameTypeOffset);
GotoIf(
MarkerIsNotFrameType(marker_or_function, StackFrame::ARGUMENTS_ADAPTOR),
&done_argument_count);
Node* adapted_parameter_count = LoadBufferObject(
frame_ptr_above, ArgumentsAdaptorFrameConstants::kLengthOffset);
frame_ptr.Bind(frame_ptr_above);
argument_count.Bind(TaggedToParameter(adapted_parameter_count, mode));
Goto(&done_argument_count);
Bind(&done_argument_count);
return std::tuple<Node*, Node*, Node*>(
frame_ptr.value(), argument_count.value(), formal_parameter_count);
}
std::tuple<Node*, Node*, Node*>
ArgumentsBuiltinsAssembler::AllocateArgumentsObject(Node* map,
Node* arguments_count,
Node* parameter_map_count,
ParameterMode mode,
int base_size) {
// Allocate the parameter object (either a Rest parameter object, a strict
// argument object or a sloppy arguments object) and the elements/mapped
// arguments together.
int elements_offset = base_size;
Node* element_count = arguments_count;
if (parameter_map_count != nullptr) {
base_size += FixedArray::kHeaderSize;
element_count = IntPtrOrSmiAdd(element_count, parameter_map_count, mode);
}
bool empty = IsIntPtrOrSmiConstantZero(arguments_count);
DCHECK_IMPLIES(empty, parameter_map_count == nullptr);
Node* size =
empty ? IntPtrConstant(base_size)
: ElementOffsetFromIndex(element_count, FAST_ELEMENTS, mode,
base_size + FixedArray::kHeaderSize);
Node* result = Allocate(size);
Comment("Initialize arguments object");
StoreMapNoWriteBarrier(result, map);
Node* empty_fixed_array = LoadRoot(Heap::kEmptyFixedArrayRootIndex);
StoreObjectField(result, JSArray::kPropertiesOffset, empty_fixed_array);
Node* smi_arguments_count = ParameterToTagged(arguments_count, mode);
StoreObjectFieldNoWriteBarrier(result, JSArray::kLengthOffset,
smi_arguments_count);
Node* arguments = nullptr;
if (!empty) {
arguments = InnerAllocate(result, elements_offset);
StoreObjectFieldNoWriteBarrier(arguments, FixedArray::kLengthOffset,
smi_arguments_count);
Node* fixed_array_map = LoadRoot(Heap::kFixedArrayMapRootIndex);
StoreMapNoWriteBarrier(arguments, fixed_array_map);
}
Node* parameter_map = nullptr;
if (parameter_map_count != nullptr) {
Node* parameter_map_offset = ElementOffsetFromIndex(
arguments_count, FAST_ELEMENTS, mode, FixedArray::kHeaderSize);
parameter_map = InnerAllocate(arguments, parameter_map_offset);
StoreObjectFieldNoWriteBarrier(result, JSArray::kElementsOffset,
parameter_map);
Node* sloppy_elements_map =
LoadRoot(Heap::kSloppyArgumentsElementsMapRootIndex);
StoreMapNoWriteBarrier(parameter_map, sloppy_elements_map);
parameter_map_count = ParameterToTagged(parameter_map_count, mode);
StoreObjectFieldNoWriteBarrier(parameter_map, FixedArray::kLengthOffset,
parameter_map_count);
} else {
if (empty) {
StoreObjectFieldNoWriteBarrier(result, JSArray::kElementsOffset,
empty_fixed_array);
} else {
StoreObjectFieldNoWriteBarrier(result, JSArray::kElementsOffset,
arguments);
}
}
return std::tuple<Node*, Node*, Node*>(result, arguments, parameter_map);
}
Node* ArgumentsBuiltinsAssembler::ConstructParametersObjectFromArgs(
Node* map, Node* frame_ptr, Node* arg_count, Node* first_arg,
Node* rest_count, ParameterMode param_mode, int base_size) {
// Allocate the parameter object (either a Rest parameter object, a strict
// argument object or a sloppy arguments object) and the elements together and
// fill in the contents with the arguments above |formal_parameter_count|.
Node* result;
Node* elements;
Node* unused;
std::tie(result, elements, unused) =
AllocateArgumentsObject(map, rest_count, nullptr, param_mode, base_size);
DCHECK(unused == nullptr);
CodeStubArguments arguments(this, arg_count, frame_ptr, param_mode);
Variable offset(this, MachineType::PointerRepresentation());
offset.Bind(IntPtrConstant(FixedArrayBase::kHeaderSize - kHeapObjectTag));
VariableList list({&offset}, zone());
arguments.ForEach(list,
[this, elements, &offset](Node* arg) {
StoreNoWriteBarrier(MachineRepresentation::kTagged,
elements, offset.value(), arg);
Increment(offset, kPointerSize);
},
first_arg, nullptr, param_mode);
return result;
}
Node* ArgumentsBuiltinsAssembler::EmitFastNewRestParameter(Node* context,
Node* function) {
Node* frame_ptr;
Node* argument_count;
Node* formal_parameter_count;
ParameterMode mode = OptimalParameterMode();
Node* zero = IntPtrOrSmiConstant(0, mode);
std::tie(frame_ptr, argument_count, formal_parameter_count) =
GetArgumentsFrameAndCount(function, mode);
Variable result(this, MachineRepresentation::kTagged);
Label no_rest_parameters(this), runtime(this, Label::kDeferred),
done(this, &result);
Node* rest_count =
IntPtrOrSmiSub(argument_count, formal_parameter_count, mode);
Node* const native_context = LoadNativeContext(context);
Node* const array_map = LoadJSArrayElementsMap(FAST_ELEMENTS, native_context);
GotoIf(IntPtrOrSmiLessThanOrEqual(rest_count, zero, mode),
&no_rest_parameters);
GotoIfFixedArraySizeDoesntFitInNewSpace(
rest_count, &runtime, JSArray::kSize + FixedArray::kHeaderSize, mode);
// Allocate the Rest JSArray and the elements together and fill in the
// contents with the arguments above |formal_parameter_count|.
result.Bind(ConstructParametersObjectFromArgs(
array_map, frame_ptr, argument_count, formal_parameter_count, rest_count,
mode, JSArray::kSize));
Goto(&done);
Bind(&no_rest_parameters);
{
Node* arguments;
Node* elements;
Node* unused;
std::tie(arguments, elements, unused) =
AllocateArgumentsObject(array_map, zero, nullptr, mode, JSArray::kSize);
result.Bind(arguments);
Goto(&done);
}
Bind(&runtime);
{
result.Bind(CallRuntime(Runtime::kNewRestParameter, context, function));
Goto(&done);
}
Bind(&done);
return result.value();
}
TF_BUILTIN(FastNewRestParameter, ArgumentsBuiltinsAssembler) {
Node* function = Parameter(FastNewArgumentsDescriptor::kFunction);
Node* context = Parameter(FastNewArgumentsDescriptor::kContext);
Return(EmitFastNewRestParameter(context, function));
}
Node* ArgumentsBuiltinsAssembler::EmitFastNewStrictArguments(Node* context,
Node* function) {
Variable result(this, MachineRepresentation::kTagged);
Label done(this, &result), empty(this), runtime(this, Label::kDeferred);
Node* frame_ptr;
Node* argument_count;
Node* formal_parameter_count;
ParameterMode mode = OptimalParameterMode();
Node* zero = IntPtrOrSmiConstant(0, mode);
std::tie(frame_ptr, argument_count, formal_parameter_count) =
GetArgumentsFrameAndCount(function, mode);
GotoIfFixedArraySizeDoesntFitInNewSpace(
argument_count, &runtime,
JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize, mode);
Node* const native_context = LoadNativeContext(context);
Node* const map =
LoadContextElement(native_context, Context::STRICT_ARGUMENTS_MAP_INDEX);
GotoIf(WordEqual(argument_count, zero), &empty);
result.Bind(ConstructParametersObjectFromArgs(
map, frame_ptr, argument_count, zero, argument_count, mode,
JSStrictArgumentsObject::kSize));
Goto(&done);
Bind(&empty);
{
Node* arguments;
Node* elements;
Node* unused;
std::tie(arguments, elements, unused) = AllocateArgumentsObject(
map, zero, nullptr, mode, JSStrictArgumentsObject::kSize);
result.Bind(arguments);
Goto(&done);
}
Bind(&runtime);
{
result.Bind(CallRuntime(Runtime::kNewStrictArguments, context, function));
Goto(&done);
}
Bind(&done);
return result.value();
}
TF_BUILTIN(FastNewStrictArguments, ArgumentsBuiltinsAssembler) {
Node* function = Parameter(FastNewArgumentsDescriptor::kFunction);
Node* context = Parameter(FastNewArgumentsDescriptor::kContext);
Return(EmitFastNewStrictArguments(context, function));
}
Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
Node* function) {
Node* frame_ptr;
Node* argument_count;
Node* formal_parameter_count;
Variable result(this, MachineRepresentation::kTagged);
ParameterMode mode = OptimalParameterMode();
Node* zero = IntPtrOrSmiConstant(0, mode);
Label done(this, &result), empty(this), no_parameters(this),
runtime(this, Label::kDeferred);
std::tie(frame_ptr, argument_count, formal_parameter_count) =
GetArgumentsFrameAndCount(function, mode);
GotoIf(WordEqual(argument_count, zero), &empty);
GotoIf(WordEqual(formal_parameter_count, zero), &no_parameters);
{
Comment("Mapped parameter JSSloppyArgumentsObject");
Node* mapped_count =
IntPtrOrSmiMin(argument_count, formal_parameter_count, mode);
Node* parameter_map_size =
IntPtrOrSmiAdd(mapped_count, IntPtrOrSmiConstant(2, mode), mode);
// Verify that the overall allocation will fit in new space.
Node* elements_allocated =
IntPtrOrSmiAdd(argument_count, parameter_map_size, mode);
GotoIfFixedArraySizeDoesntFitInNewSpace(
elements_allocated, &runtime,
JSSloppyArgumentsObject::kSize + FixedArray::kHeaderSize * 2, mode);
Node* const native_context = LoadNativeContext(context);
Node* const map = LoadContextElement(
native_context, Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX);
Node* argument_object;
Node* elements;
Node* map_array;
std::tie(argument_object, elements, map_array) =
AllocateArgumentsObject(map, argument_count, parameter_map_size, mode,
JSSloppyArgumentsObject::kSize);
StoreObjectFieldNoWriteBarrier(
argument_object, JSSloppyArgumentsObject::kCalleeOffset, function);
StoreFixedArrayElement(map_array, 0, context, SKIP_WRITE_BARRIER);
StoreFixedArrayElement(map_array, 1, elements, SKIP_WRITE_BARRIER);
Comment("Fill in non-mapped parameters");
Node* argument_offset =
ElementOffsetFromIndex(argument_count, FAST_ELEMENTS, mode,
FixedArray::kHeaderSize - kHeapObjectTag);
Node* mapped_offset =
ElementOffsetFromIndex(mapped_count, FAST_ELEMENTS, mode,
FixedArray::kHeaderSize - kHeapObjectTag);
CodeStubArguments arguments(this, argument_count, frame_ptr, mode);
Variable current_argument(this, MachineType::PointerRepresentation());
current_argument.Bind(arguments.AtIndexPtr(argument_count, mode));
VariableList var_list1({&current_argument}, zone());
mapped_offset = BuildFastLoop(
var_list1, argument_offset, mapped_offset,
[this, elements, &current_argument](Node* offset) {
Increment(current_argument, kPointerSize);
Node* arg = LoadBufferObject(current_argument.value(), 0);
StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, offset,
arg);
},
-kPointerSize, INTPTR_PARAMETERS);
// Copy the parameter slots and the holes in the arguments.
// We need to fill in mapped_count slots. They index the context,
// where parameters are stored in reverse order, at
// MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+argument_count-1
// The mapped parameter thus need to get indices
// MIN_CONTEXT_SLOTS+parameter_count-1 ..
// MIN_CONTEXT_SLOTS+argument_count-mapped_count
// We loop from right to left.
Comment("Fill in mapped parameters");
Variable context_index(this, OptimalParameterRepresentation());
context_index.Bind(IntPtrOrSmiSub(
IntPtrOrSmiAdd(IntPtrOrSmiConstant(Context::MIN_CONTEXT_SLOTS, mode),
formal_parameter_count, mode),
mapped_count, mode));
Node* the_hole = TheHoleConstant();
VariableList var_list2({&context_index}, zone());
const int kParameterMapHeaderSize =
FixedArray::kHeaderSize + 2 * kPointerSize;
Node* adjusted_map_array = IntPtrAdd(
BitcastTaggedToWord(map_array),
IntPtrConstant(kParameterMapHeaderSize - FixedArray::kHeaderSize));
Node* zero_offset = ElementOffsetFromIndex(
zero, FAST_ELEMENTS, mode, FixedArray::kHeaderSize - kHeapObjectTag);
BuildFastLoop(var_list2, mapped_offset, zero_offset,
[this, the_hole, elements, adjusted_map_array, &context_index,
mode](Node* offset) {
StoreNoWriteBarrier(MachineRepresentation::kTagged,
elements, offset, the_hole);
StoreNoWriteBarrier(
MachineRepresentation::kTagged, adjusted_map_array,
offset, ParameterToTagged(context_index.value(), mode));
Increment(context_index, 1, mode);
},
-kPointerSize, INTPTR_PARAMETERS);
result.Bind(argument_object);
Goto(&done);
}
Bind(&no_parameters);
{
Comment("No parameters JSSloppyArgumentsObject");
GotoIfFixedArraySizeDoesntFitInNewSpace(
argument_count, &runtime,
JSSloppyArgumentsObject::kSize + FixedArray::kHeaderSize, mode);
Node* const native_context = LoadNativeContext(context);
Node* const map =
LoadContextElement(native_context, Context::SLOPPY_ARGUMENTS_MAP_INDEX);
result.Bind(ConstructParametersObjectFromArgs(
map, frame_ptr, argument_count, zero, argument_count, mode,
JSSloppyArgumentsObject::kSize));
StoreObjectFieldNoWriteBarrier(
result.value(), JSSloppyArgumentsObject::kCalleeOffset, function);
Goto(&done);
}
Bind(&empty);
{
Comment("Empty JSSloppyArgumentsObject");
Node* const native_context = LoadNativeContext(context);
Node* const map =
LoadContextElement(native_context, Context::SLOPPY_ARGUMENTS_MAP_INDEX);
Node* arguments;
Node* elements;
Node* unused;
std::tie(arguments, elements, unused) = AllocateArgumentsObject(
map, zero, nullptr, mode, JSSloppyArgumentsObject::kSize);
result.Bind(arguments);
StoreObjectFieldNoWriteBarrier(
result.value(), JSSloppyArgumentsObject::kCalleeOffset, function);
Goto(&done);
}
Bind(&runtime);
{
result.Bind(CallRuntime(Runtime::kNewSloppyArguments, context, function));
Goto(&done);
}
Bind(&done);
return result.value();
}
TF_BUILTIN(FastNewSloppyArguments, ArgumentsBuiltinsAssembler) {
Node* function = Parameter(FastNewArgumentsDescriptor::kFunction);
Node* context = Parameter(FastNewArgumentsDescriptor::kContext);
Return(EmitFastNewSloppyArguments(context, function));
}
} // namespace internal
} // namespace v8

View File

@ -0,0 +1,55 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/code-stub-assembler.h"
namespace v8 {
namespace internal {
typedef compiler::Node Node;
typedef compiler::CodeAssemblerState CodeAssemblerState;
typedef compiler::CodeAssemblerLabel CodeAssemblerLabel;
class ArgumentsBuiltinsAssembler : public CodeStubAssembler {
public:
explicit ArgumentsBuiltinsAssembler(CodeAssemblerState* state)
: CodeStubAssembler(state) {}
Node* EmitFastNewStrictArguments(Node* context, Node* function);
Node* EmitFastNewSloppyArguments(Node* context, Node* function);
Node* EmitFastNewRestParameter(Node* context, Node* function);
private:
// Calculates and returns the the frame pointer, argument count and formal
// parameter count to be used to access a function's parameters, taking
// argument adapter frames into account. The tuple is of the form:
// <frame_ptr, # parameters actually passed, formal parameter count>
std::tuple<Node*, Node*, Node*> GetArgumentsFrameAndCount(Node* function,
ParameterMode mode);
// Allocates an an arguments (either rest, strict or sloppy) together with the
// FixedArray elements for the arguments and a parameter map (for sloppy
// arguments only). A tuple is returned with pointers to the arguments object,
// the elements and parameter map in the form:
// <argument object, arguments FixedArray, parameter map or nullptr>
std::tuple<Node*, Node*, Node*> AllocateArgumentsObject(
Node* map, Node* arguments, Node* mapped_arguments,
ParameterMode param_mode, int base_size);
// For Rest parameters and Strict arguments, the copying of parameters from
// the stack into the arguments object is straight-forward and shares much of
// the same underlying logic, which is encapsulated by this function. It
// allocates an arguments-like object of size |base_size| with the map |map|,
// and then copies |rest_count| arguments from the stack frame pointed to by
// |frame_ptr| starting from |first_arg|. |arg_count| == |first_arg| +
// |rest_count|.
Node* ConstructParametersObjectFromArgs(Node* map, Node* frame_ptr,
Node* arg_count, Node* first_arg,
Node* rest_count,
ParameterMode param_mode,
int base_size);
};
} // namespace internal
} // namespace v8

File diff suppressed because it is too large Load Diff

View File

@ -2,8 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
#include "src/builtins/builtins.h"
#include "src/conversions.h"
#include "src/counters.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {

View File

@ -0,0 +1,208 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/builtins/builtins-async.h"
#include "src/builtins/builtins-utils.h"
#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
typedef compiler::Node Node;
typedef CodeStubAssembler::ParameterMode ParameterMode;
typedef compiler::CodeAssemblerState CodeAssemblerState;
class AsyncFunctionBuiltinsAssembler : public AsyncBuiltinsAssembler {
public:
explicit AsyncFunctionBuiltinsAssembler(CodeAssemblerState* state)
: AsyncBuiltinsAssembler(state) {}
protected:
void AsyncFunctionAwait(Node* const context, Node* const generator,
Node* const awaited, Node* const outer_promise,
const bool is_predicted_as_caught);
void AsyncFunctionAwaitResumeClosure(
Node* const context, Node* const sent_value,
JSGeneratorObject::ResumeMode resume_mode);
};
namespace {
// Describe fields of Context associated with AsyncFunctionAwait resume
// closures.
// TODO(jgruber): Refactor to reuse code for upcoming async-generators.
class AwaitContext {
public:
enum Fields { kGeneratorSlot = Context::MIN_CONTEXT_SLOTS, kLength };
};
} // anonymous namespace
void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwaitResumeClosure(
Node* context, Node* sent_value,
JSGeneratorObject::ResumeMode resume_mode) {
DCHECK(resume_mode == JSGeneratorObject::kNext ||
resume_mode == JSGeneratorObject::kThrow);
Node* const generator =
LoadContextElement(context, AwaitContext::kGeneratorSlot);
CSA_SLOW_ASSERT(this, HasInstanceType(generator, JS_GENERATOR_OBJECT_TYPE));
// Inline version of GeneratorPrototypeNext / GeneratorPrototypeReturn with
// unnecessary runtime checks removed.
// TODO(jgruber): Refactor to reuse code from builtins-generator.cc.
// Ensure that the generator is neither closed nor running.
CSA_SLOW_ASSERT(
this,
SmiGreaterThan(
LoadObjectField(generator, JSGeneratorObject::kContinuationOffset),
SmiConstant(JSGeneratorObject::kGeneratorClosed)));
// Resume the {receiver} using our trampoline.
Callable callable = CodeFactory::ResumeGenerator(isolate());
CallStub(callable, context, sent_value, generator, SmiConstant(resume_mode));
// The resulting Promise is a throwaway, so it doesn't matter what it
// resolves to. What is important is that we don't end up keeping the
// whole chain of intermediate Promises alive by returning the return value
// of ResumeGenerator, as that would create a memory leak.
}
TF_BUILTIN(AsyncFunctionAwaitRejectClosure, AsyncFunctionBuiltinsAssembler) {
CSA_ASSERT_JS_ARGC_EQ(this, 1);
Node* const sentError = Parameter(1);
Node* const context = Parameter(4);
AsyncFunctionAwaitResumeClosure(context, sentError,
JSGeneratorObject::kThrow);
Return(UndefinedConstant());
}
TF_BUILTIN(AsyncFunctionAwaitResolveClosure, AsyncFunctionBuiltinsAssembler) {
CSA_ASSERT_JS_ARGC_EQ(this, 1);
Node* const sentValue = Parameter(1);
Node* const context = Parameter(4);
AsyncFunctionAwaitResumeClosure(context, sentValue, JSGeneratorObject::kNext);
Return(UndefinedConstant());
}
// ES#abstract-ops-async-function-await
// AsyncFunctionAwait ( value )
// Shared logic for the core of await. The parser desugars
// await awaited
// into
// yield AsyncFunctionAwait{Caught,Uncaught}(.generator, awaited, .promise)
// The 'awaited' parameter is the value; the generator stands in
// for the asyncContext, and .promise is the larger promise under
// construction by the enclosing async function.
void AsyncFunctionBuiltinsAssembler::AsyncFunctionAwait(
Node* const context, Node* const generator, Node* const awaited,
Node* const outer_promise, const bool is_predicted_as_caught) {
CSA_SLOW_ASSERT(this, HasInstanceType(generator, JS_GENERATOR_OBJECT_TYPE));
CSA_SLOW_ASSERT(this, HasInstanceType(outer_promise, JS_PROMISE_TYPE));
NodeGenerator1 create_closure_context = [&](Node* native_context) -> Node* {
Node* const context =
CreatePromiseContext(native_context, AwaitContext::kLength);
StoreContextElementNoWriteBarrier(context, AwaitContext::kGeneratorSlot,
generator);
return context;
};
// TODO(jgruber): AsyncBuiltinsAssembler::Await currently does not reuse
// the awaited promise if it is already a promise. Reuse is non-spec compliant
// but part of our old behavior gives us a couple of percent
// performance boost.
// TODO(jgruber): Use a faster specialized version of
// InternalPerformPromiseThen.
Node* const result = Await(
context, generator, awaited, outer_promise, create_closure_context,
Context::ASYNC_FUNCTION_AWAIT_RESOLVE_SHARED_FUN,
Context::ASYNC_FUNCTION_AWAIT_REJECT_SHARED_FUN, is_predicted_as_caught);
Return(result);
}
// Called by the parser from the desugaring of 'await' when catch
// prediction indicates that there is a locally surrounding catch block.
TF_BUILTIN(AsyncFunctionAwaitCaught, AsyncFunctionBuiltinsAssembler) {
CSA_ASSERT_JS_ARGC_EQ(this, 3);
Node* const generator = Parameter(1);
Node* const awaited = Parameter(2);
Node* const outer_promise = Parameter(3);
Node* const context = Parameter(6);
static const bool kIsPredictedAsCaught = true;
AsyncFunctionAwait(context, generator, awaited, outer_promise,
kIsPredictedAsCaught);
}
// Called by the parser from the desugaring of 'await' when catch
// prediction indicates no locally surrounding catch block.
TF_BUILTIN(AsyncFunctionAwaitUncaught, AsyncFunctionBuiltinsAssembler) {
CSA_ASSERT_JS_ARGC_EQ(this, 3);
Node* const generator = Parameter(1);
Node* const awaited = Parameter(2);
Node* const outer_promise = Parameter(3);
Node* const context = Parameter(6);
static const bool kIsPredictedAsCaught = false;
AsyncFunctionAwait(context, generator, awaited, outer_promise,
kIsPredictedAsCaught);
}
TF_BUILTIN(AsyncFunctionPromiseCreate, AsyncFunctionBuiltinsAssembler) {
CSA_ASSERT_JS_ARGC_EQ(this, 0);
Node* const context = Parameter(3);
Node* const promise = AllocateAndInitJSPromise(context);
Label if_is_debug_active(this, Label::kDeferred);
GotoIf(IsDebugActive(), &if_is_debug_active);
// Early exit if debug is not active.
Return(promise);
Bind(&if_is_debug_active);
{
// Push the Promise under construction in an async function on
// the catch prediction stack to handle exceptions thrown before
// the first await.
// Assign ID and create a recurring task to save stack for future
// resumptions from await.
CallRuntime(Runtime::kDebugAsyncFunctionPromiseCreated, context, promise);
Return(promise);
}
}
TF_BUILTIN(AsyncFunctionPromiseRelease, AsyncFunctionBuiltinsAssembler) {
CSA_ASSERT_JS_ARGC_EQ(this, 1);
Node* const promise = Parameter(1);
Node* const context = Parameter(4);
Label if_is_debug_active(this, Label::kDeferred);
GotoIf(IsDebugActive(), &if_is_debug_active);
// Early exit if debug is not active.
Return(UndefinedConstant());
Bind(&if_is_debug_active);
{
// Pop the Promise under construction in an async function on
// from catch prediction stack.
CallRuntime(Runtime::kDebugPopPromise, context);
Return(promise);
}
}
} // namespace internal
} // namespace v8

View File

@ -0,0 +1,326 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/builtins/builtins-async.h"
#include "src/builtins/builtins-utils.h"
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
#include "src/frames-inl.h"
namespace v8 {
namespace internal {
namespace {
// Describe fields of Context associated with the AsyncIterator unwrap closure.
class ValueUnwrapContext {
public:
enum Fields { kDoneSlot = Context::MIN_CONTEXT_SLOTS, kLength };
};
class AsyncFromSyncBuiltinsAssembler : public AsyncBuiltinsAssembler {
public:
explicit AsyncFromSyncBuiltinsAssembler(CodeAssemblerState* state)
: AsyncBuiltinsAssembler(state) {}
void ThrowIfNotAsyncFromSyncIterator(Node* const context, Node* const object,
Label* if_exception,
Variable* var_exception,
const char* method_name);
typedef std::function<void(Node* const context, Node* const promise,
Label* if_exception)>
UndefinedMethodHandler;
void Generate_AsyncFromSyncIteratorMethod(
Node* const context, Node* const iterator, Node* const sent_value,
Handle<Name> method_name, UndefinedMethodHandler&& if_method_undefined,
const char* operation_name,
Label::Type reject_label_type = Label::kDeferred,
Node* const initial_exception_value = nullptr);
Node* AllocateAsyncIteratorValueUnwrapContext(Node* native_context,
Node* done);
// Load "value" and "done" from an iterator result object. If an exception
// is thrown at any point, jumps to te `if_exception` label with exception
// stored in `var_exception`.
//
// Returns a Pair of Nodes, whose first element is the value of the "value"
// property, and whose second element is the value of the "done" property,
// converted to a Boolean if needed.
std::pair<Node*, Node*> LoadIteratorResult(Node* const context,
Node* const native_context,
Node* const iter_result,
Label* if_exception,
Variable* var_exception);
Node* CreateUnwrapClosure(Node* const native_context, Node* const done);
};
void AsyncFromSyncBuiltinsAssembler::ThrowIfNotAsyncFromSyncIterator(
Node* const context, Node* const object, Label* if_exception,
Variable* var_exception, const char* method_name) {
Label if_receiverisincompatible(this, Label::kDeferred), done(this);
GotoIf(TaggedIsSmi(object), &if_receiverisincompatible);
Branch(HasInstanceType(object, JS_ASYNC_FROM_SYNC_ITERATOR_TYPE), &done,
&if_receiverisincompatible);
Bind(&if_receiverisincompatible);
{
// If Type(O) is not Object, or if O does not have a [[SyncIterator]]
// internal slot, then
// Let badIteratorError be a new TypeError exception.
Node* const error =
MakeTypeError(MessageTemplate::kIncompatibleMethodReceiver, context,
CStringConstant(method_name), object);
// Perform ! Call(promiseCapability.[[Reject]], undefined,
// « badIteratorError »).
var_exception->Bind(error);
Goto(if_exception);
}
Bind(&done);
}
void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod(
Node* const context, Node* const iterator, Node* const sent_value,
Handle<Name> method_name, UndefinedMethodHandler&& if_method_undefined,
const char* operation_name, Label::Type reject_label_type,
Node* const initial_exception_value) {
Node* const native_context = LoadNativeContext(context);
Node* const promise = AllocateAndInitJSPromise(context);
Variable var_exception(this, MachineRepresentation::kTagged,
initial_exception_value == nullptr
? UndefinedConstant()
: initial_exception_value);
Label reject_promise(this, reject_label_type);
ThrowIfNotAsyncFromSyncIterator(context, iterator, &reject_promise,
&var_exception, operation_name);
Node* const sync_iterator =
LoadObjectField(iterator, JSAsyncFromSyncIterator::kSyncIteratorOffset);
Node* const method = GetProperty(context, sync_iterator, method_name);
if (if_method_undefined) {
Label if_isnotundefined(this);
GotoIfNot(IsUndefined(method), &if_isnotundefined);
if_method_undefined(native_context, promise, &reject_promise);
Bind(&if_isnotundefined);
}
Node* const iter_result = CallJS(CodeFactory::Call(isolate()), context,
method, sync_iterator, sent_value);
GotoIfException(iter_result, &reject_promise, &var_exception);
Node* value;
Node* done;
std::tie(value, done) = LoadIteratorResult(
context, native_context, iter_result, &reject_promise, &var_exception);
Node* const wrapper = AllocateAndInitJSPromise(context);
// Perform ! Call(valueWrapperCapability.[[Resolve]], undefined, «
// throwValue »).
InternalResolvePromise(context, wrapper, value);
// Let onFulfilled be a new built-in function object as defined in
// Async Iterator Value Unwrap Functions.
// Set onFulfilled.[[Done]] to throwDone.
Node* const on_fulfilled = CreateUnwrapClosure(native_context, done);
// Perform ! PerformPromiseThen(valueWrapperCapability.[[Promise]],
// onFulfilled, undefined, promiseCapability).
Node* const undefined = UndefinedConstant();
InternalPerformPromiseThen(context, wrapper, on_fulfilled, undefined, promise,
undefined, undefined);
Return(promise);
Bind(&reject_promise);
{
Node* const exception = var_exception.value();
InternalPromiseReject(context, promise, exception, TrueConstant());
Return(promise);
}
}
std::pair<Node*, Node*> AsyncFromSyncBuiltinsAssembler::LoadIteratorResult(
Node* const context, Node* const native_context, Node* const iter_result,
Label* if_exception, Variable* var_exception) {
Label if_fastpath(this), if_slowpath(this), merge(this), to_boolean(this),
done(this), if_notanobject(this, Label::kDeferred);
GotoIf(TaggedIsSmi(iter_result), &if_notanobject);
Node* const iter_result_map = LoadMap(iter_result);
GotoIfNot(IsJSReceiverMap(iter_result_map), &if_notanobject);
Node* const fast_iter_result_map =
LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX);
Variable var_value(this, MachineRepresentation::kTagged);
Variable var_done(this, MachineRepresentation::kTagged);
Branch(WordEqual(iter_result_map, fast_iter_result_map), &if_fastpath,
&if_slowpath);
Bind(&if_fastpath);
{
var_value.Bind(
LoadObjectField(iter_result, JSIteratorResult::kValueOffset));
var_done.Bind(LoadObjectField(iter_result, JSIteratorResult::kDoneOffset));
Goto(&merge);
}
Bind(&if_slowpath);
{
// Let nextValue be IteratorValue(nextResult).
// IfAbruptRejectPromise(nextValue, promiseCapability).
Node* const value =
GetProperty(context, iter_result, factory()->value_string());
GotoIfException(value, if_exception, var_exception);
// Let nextDone be IteratorComplete(nextResult).
// IfAbruptRejectPromise(nextDone, promiseCapability).
Node* const done =
GetProperty(context, iter_result, factory()->done_string());
GotoIfException(done, if_exception, var_exception);
var_value.Bind(value);
var_done.Bind(done);
Goto(&merge);
}
Bind(&if_notanobject);
{
// Sync iterator result is not an object --- Produce a TypeError and jump
// to the `if_exception` path.
Node* const error = MakeTypeError(
MessageTemplate::kIteratorResultNotAnObject, context, iter_result);
var_exception->Bind(error);
Goto(if_exception);
}
Bind(&merge);
// Ensure `iterResult.done` is a Boolean.
GotoIf(TaggedIsSmi(var_done.value()), &to_boolean);
Branch(IsBoolean(var_done.value()), &done, &to_boolean);
Bind(&to_boolean);
{
Node* const result =
CallStub(CodeFactory::ToBoolean(isolate()), context, var_done.value());
var_done.Bind(result);
Goto(&done);
}
Bind(&done);
return std::make_pair(var_value.value(), var_done.value());
}
Node* AsyncFromSyncBuiltinsAssembler::CreateUnwrapClosure(Node* native_context,
Node* done) {
Node* const map = LoadContextElement(
native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
Node* const on_fulfilled_shared = LoadContextElement(
native_context, Context::ASYNC_ITERATOR_VALUE_UNWRAP_SHARED_FUN);
CSA_ASSERT(this,
HasInstanceType(on_fulfilled_shared, SHARED_FUNCTION_INFO_TYPE));
Node* const closure_context =
AllocateAsyncIteratorValueUnwrapContext(native_context, done);
return AllocateFunctionWithMapAndContext(map, on_fulfilled_shared,
closure_context);
}
Node* AsyncFromSyncBuiltinsAssembler::AllocateAsyncIteratorValueUnwrapContext(
Node* native_context, Node* done) {
CSA_ASSERT(this, IsNativeContext(native_context));
CSA_ASSERT(this, IsBoolean(done));
Node* const context =
CreatePromiseContext(native_context, ValueUnwrapContext::kLength);
StoreContextElementNoWriteBarrier(context, ValueUnwrapContext::kDoneSlot,
done);
return context;
}
} // namespace
// https://tc39.github.io/proposal-async-iteration/
// Section #sec-%asyncfromsynciteratorprototype%.next
TF_BUILTIN(AsyncFromSyncIteratorPrototypeNext, AsyncFromSyncBuiltinsAssembler) {
Node* const iterator = Parameter(0);
Node* const value = Parameter(1);
Node* const context = Parameter(4);
Generate_AsyncFromSyncIteratorMethod(
context, iterator, value, factory()->next_string(),
UndefinedMethodHandler(), "[Async-from-Sync Iterator].prototype.next");
}
// https://tc39.github.io/proposal-async-iteration/
// Section #sec-%asyncfromsynciteratorprototype%.return
TF_BUILTIN(AsyncFromSyncIteratorPrototypeReturn,
AsyncFromSyncBuiltinsAssembler) {
Node* const iterator = Parameter(0);
Node* const value = Parameter(1);
Node* const context = Parameter(4);
auto if_return_undefined = [=](Node* const native_context,
Node* const promise, Label* if_exception) {
// If return is undefined, then
// Let iterResult be ! CreateIterResultObject(value, true)
Node* const iter_result =
CallStub(CodeFactory::CreateIterResultObject(isolate()), context, value,
TrueConstant());
// Perform ! Call(promiseCapability.[[Resolve]], undefined, « iterResult »).
// IfAbruptRejectPromise(nextDone, promiseCapability).
// Return promiseCapability.[[Promise]].
PromiseFulfill(context, promise, iter_result, v8::Promise::kFulfilled);
Return(promise);
};
Generate_AsyncFromSyncIteratorMethod(
context, iterator, value, factory()->return_string(), if_return_undefined,
"[Async-from-Sync Iterator].prototype.return");
}
// https://tc39.github.io/proposal-async-iteration/
// Section #sec-%asyncfromsynciteratorprototype%.throw
TF_BUILTIN(AsyncFromSyncIteratorPrototypeThrow,
AsyncFromSyncBuiltinsAssembler) {
Node* const iterator = Parameter(0);
Node* const reason = Parameter(1);
Node* const context = Parameter(4);
auto if_throw_undefined = [=](Node* const native_context, Node* const promise,
Label* if_exception) { Goto(if_exception); };
Generate_AsyncFromSyncIteratorMethod(
context, iterator, reason, factory()->throw_string(), if_throw_undefined,
"[Async-from-Sync Iterator].prototype.throw", Label::kNonDeferred,
reason);
}
TF_BUILTIN(AsyncIteratorValueUnwrap, AsyncFromSyncBuiltinsAssembler) {
Node* const value = Parameter(1);
Node* const context = Parameter(4);
Node* const done = LoadContextElement(context, ValueUnwrapContext::kDoneSlot);
CSA_ASSERT(this, IsBoolean(done));
Node* const unwrapped_value = CallStub(
CodeFactory::CreateIterResultObject(isolate()), context, value, done);
Return(unwrapped_value);
}
} // namespace internal
} // namespace v8

92
deps/v8/src/builtins/builtins-async.cc vendored Normal file
View File

@ -0,0 +1,92 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/builtins/builtins-async.h"
#include "src/builtins/builtins-utils.h"
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
#include "src/frames-inl.h"
namespace v8 {
namespace internal {
Node* AsyncBuiltinsAssembler::Await(
Node* context, Node* generator, Node* value, Node* outer_promise,
const NodeGenerator1& create_closure_context, int on_resolve_context_index,
int on_reject_context_index, bool is_predicted_as_caught) {
// Let promiseCapability be ! NewPromiseCapability(%Promise%).
Node* const wrapped_value = AllocateAndInitJSPromise(context);
// Perform ! Call(promiseCapability.[[Resolve]], undefined, « promise »).
InternalResolvePromise(context, wrapped_value, value);
Node* const native_context = LoadNativeContext(context);
Node* const closure_context = create_closure_context(native_context);
Node* const map = LoadContextElement(
native_context, Context::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX);
// Load and allocate on_resolve closure
Node* const on_resolve_shared_fun =
LoadContextElement(native_context, on_resolve_context_index);
CSA_SLOW_ASSERT(
this, HasInstanceType(on_resolve_shared_fun, SHARED_FUNCTION_INFO_TYPE));
Node* const on_resolve = AllocateFunctionWithMapAndContext(
map, on_resolve_shared_fun, closure_context);
// Load and allocate on_reject closure
Node* const on_reject_shared_fun =
LoadContextElement(native_context, on_reject_context_index);
CSA_SLOW_ASSERT(
this, HasInstanceType(on_reject_shared_fun, SHARED_FUNCTION_INFO_TYPE));
Node* const on_reject = AllocateFunctionWithMapAndContext(
map, on_reject_shared_fun, closure_context);
Node* const throwaway_promise =
AllocateAndInitJSPromise(context, wrapped_value);
// The Promise will be thrown away and not handled, but it shouldn't trigger
// unhandled reject events as its work is done
PromiseSetHasHandler(throwaway_promise);
Label do_perform_promise_then(this);
GotoIfNot(IsDebugActive(), &do_perform_promise_then);
{
Label common(this);
GotoIf(TaggedIsSmi(value), &common);
GotoIfNot(HasInstanceType(value, JS_PROMISE_TYPE), &common);
{
// Mark the reject handler callback to be a forwarding edge, rather
// than a meaningful catch handler
Node* const key =
HeapConstant(factory()->promise_forwarding_handler_symbol());
CallRuntime(Runtime::kSetProperty, context, on_reject, key,
TrueConstant(), SmiConstant(STRICT));
if (is_predicted_as_caught) PromiseSetHandledHint(value);
}
Goto(&common);
Bind(&common);
// Mark the dependency to outer Promise in case the throwaway Promise is
// found on the Promise stack
CSA_SLOW_ASSERT(this, HasInstanceType(outer_promise, JS_PROMISE_TYPE));
Node* const key = HeapConstant(factory()->promise_handled_by_symbol());
CallRuntime(Runtime::kSetProperty, context, throwaway_promise, key,
outer_promise, SmiConstant(STRICT));
}
Goto(&do_perform_promise_then);
Bind(&do_perform_promise_then);
InternalPerformPromiseThen(context, wrapped_value, on_resolve, on_reject,
throwaway_promise, UndefinedConstant(),
UndefinedConstant());
return wrapped_value;
}
} // namespace internal
} // namespace v8

35
deps/v8/src/builtins/builtins-async.h vendored Normal file
View File

@ -0,0 +1,35 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_BUILTINS_BUILTINS_ASYNC_H_
#define V8_BUILTINS_BUILTINS_ASYNC_H_
#include "src/builtins/builtins-promise.h"
namespace v8 {
namespace internal {
class AsyncBuiltinsAssembler : public PromiseBuiltinsAssembler {
public:
explicit AsyncBuiltinsAssembler(CodeAssemblerState* state)
: PromiseBuiltinsAssembler(state) {}
protected:
typedef std::function<Node*(Node*)> NodeGenerator1;
// Perform steps to resume generator after `value` is resolved.
// `on_reject_context_index` is an index into the Native Context, which should
// point to a SharedFunctioninfo instance used to create the closure. The
// value following the reject index should be a similar value for the resolve
// closure. Returns the Promise-wrapped `value`.
Node* Await(Node* context, Node* generator, Node* value, Node* outer_promise,
const NodeGenerator1& create_closure_context,
int on_resolve_context_index, int on_reject_context_index,
bool is_predicted_as_caught);
};
} // namespace internal
} // namespace v8
#endif // V8_BUILTINS_BUILTINS_ASYNC_H_

View File

@ -5,6 +5,8 @@
#include "src/builtins/builtins-utils.h"
#include "src/builtins/builtins.h"
#include "src/code-stub-assembler.h"
#include "src/counters.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {

View File

@ -2,8 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
#include "src/builtins/builtins.h"
#include "src/isolate.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@ -147,5 +150,14 @@ void Builtins::Generate_TailCall_ReceiverIsAny(MacroAssembler* masm) {
Generate_Call(masm, ConvertReceiverMode::kAny, TailCallMode::kAllow);
}
void Builtins::Generate_CallForwardVarargs(MacroAssembler* masm) {
Generate_CallForwardVarargs(masm, masm->isolate()->builtins()->Call());
}
void Builtins::Generate_CallFunctionForwardVarargs(MacroAssembler* masm) {
Generate_CallForwardVarargs(masm,
masm->isolate()->builtins()->CallFunction());
}
} // namespace internal
} // namespace v8

View File

@ -5,6 +5,8 @@
#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
#include "src/counters.h"
#include "src/objects-inl.h"
#include "src/string-builder.h"
#include "src/wasm/wasm-module.h"

View File

@ -8,7 +8,9 @@
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
#include "src/counters.h"
#include "src/interface-descriptors.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@ -47,7 +49,7 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewClosure(Node* shared_info,
Node* is_not_normal =
Word32And(compiler_hints,
Int32Constant(SharedFunctionInfo::kAllFunctionKindBitsMask));
GotoUnless(is_not_normal, &if_normal);
GotoIfNot(is_not_normal, &if_normal);
Node* is_generator = Word32And(
compiler_hints, Int32Constant(FunctionKind::kGeneratorFunction
@ -120,13 +122,34 @@ Node* ConstructorBuiltinsAssembler::EmitFastNewClosure(Node* shared_info,
// Initialize the rest of the function.
Node* empty_fixed_array = HeapConstant(factory->empty_fixed_array());
Node* empty_literals_array = HeapConstant(factory->empty_literals_array());
StoreObjectFieldNoWriteBarrier(result, JSObject::kPropertiesOffset,
empty_fixed_array);
StoreObjectFieldNoWriteBarrier(result, JSObject::kElementsOffset,
empty_fixed_array);
StoreObjectFieldNoWriteBarrier(result, JSFunction::kLiteralsOffset,
empty_literals_array);
Node* literals_cell = LoadFixedArrayElement(
feedback_vector, slot, 0, CodeStubAssembler::SMI_PARAMETERS);
{
// Bump the closure counter encoded in the cell's map.
Node* cell_map = LoadMap(literals_cell);
Label no_closures(this), one_closure(this), cell_done(this);
GotoIf(IsNoClosuresCellMap(cell_map), &no_closures);
GotoIf(IsOneClosureCellMap(cell_map), &one_closure);
CSA_ASSERT(this, IsManyClosuresCellMap(cell_map));
Goto(&cell_done);
Bind(&no_closures);
StoreMapNoWriteBarrier(literals_cell, Heap::kOneClosureCellMapRootIndex);
Goto(&cell_done);
Bind(&one_closure);
StoreMapNoWriteBarrier(literals_cell, Heap::kManyClosuresCellMapRootIndex);
Goto(&cell_done);
Bind(&cell_done);
}
StoreObjectFieldNoWriteBarrier(result, JSFunction::kFeedbackVectorOffset,
literals_cell);
StoreObjectFieldNoWriteBarrier(
result, JSFunction::kPrototypeOrInitialMapOffset, TheHoleConstant());
StoreObjectFieldNoWriteBarrier(result, JSFunction::kSharedFunctionInfoOffset,
@ -400,10 +423,9 @@ Node* ConstructorBuiltinsAssembler::EmitFastCloneRegExp(Node* closure,
Variable result(this, MachineRepresentation::kTagged);
Node* literals_array = LoadObjectField(closure, JSFunction::kLiteralsOffset);
Node* boilerplate =
LoadFixedArrayElement(literals_array, literal_index,
LiteralsArray::kFirstLiteralIndex * kPointerSize,
Node* cell = LoadObjectField(closure, JSFunction::kFeedbackVectorOffset);
Node* feedback_vector = LoadObjectField(cell, Cell::kValueOffset);
Node* boilerplate = LoadFixedArrayElement(feedback_vector, literal_index, 0,
CodeStubAssembler::SMI_PARAMETERS);
GotoIf(IsUndefined(boilerplate), &call_runtime);
@ -484,16 +506,13 @@ Node* ConstructorBuiltinsAssembler::EmitFastCloneShallowArray(
return_result(this);
Variable result(this, MachineRepresentation::kTagged);
Node* literals_array = LoadObjectField(closure, JSFunction::kLiteralsOffset);
Node* allocation_site =
LoadFixedArrayElement(literals_array, literal_index,
LiteralsArray::kFirstLiteralIndex * kPointerSize,
CodeStubAssembler::SMI_PARAMETERS);
Node* cell = LoadObjectField(closure, JSFunction::kFeedbackVectorOffset);
Node* feedback_vector = LoadObjectField(cell, Cell::kValueOffset);
Node* allocation_site = LoadFixedArrayElement(
feedback_vector, literal_index, 0, CodeStubAssembler::SMI_PARAMETERS);
GotoIf(IsUndefined(allocation_site), call_runtime);
allocation_site =
LoadFixedArrayElement(literals_array, literal_index,
LiteralsArray::kFirstLiteralIndex * kPointerSize,
allocation_site = LoadFixedArrayElement(feedback_vector, literal_index, 0,
CodeStubAssembler::SMI_PARAMETERS);
Node* boilerplate =
@ -645,11 +664,10 @@ int ConstructorBuiltinsAssembler::FastCloneShallowObjectPropertiesCount(
Node* ConstructorBuiltinsAssembler::EmitFastCloneShallowObject(
CodeAssemblerLabel* call_runtime, Node* closure, Node* literals_index,
Node* properties_count) {
Node* literals_array = LoadObjectField(closure, JSFunction::kLiteralsOffset);
Node* allocation_site =
LoadFixedArrayElement(literals_array, literals_index,
LiteralsArray::kFirstLiteralIndex * kPointerSize,
CodeStubAssembler::SMI_PARAMETERS);
Node* cell = LoadObjectField(closure, JSFunction::kFeedbackVectorOffset);
Node* feedback_vector = LoadObjectField(cell, Cell::kValueOffset);
Node* allocation_site = LoadFixedArrayElement(
feedback_vector, literals_index, 0, CodeStubAssembler::SMI_PARAMETERS);
GotoIf(IsUndefined(allocation_site), call_runtime);
// Calculate the object and allocation size based on the properties count.
@ -665,7 +683,7 @@ Node* ConstructorBuiltinsAssembler::EmitFastCloneShallowObject(
Node* boilerplate_map = LoadMap(boilerplate);
Node* instance_size = LoadMapInstanceSize(boilerplate_map);
Node* size_in_words = WordShr(object_size, kPointerSizeLog2);
GotoUnless(WordEqual(instance_size, size_in_words), call_runtime);
GotoIfNot(WordEqual(instance_size, size_in_words), call_runtime);
Node* copy = Allocate(allocation_size);
@ -689,8 +707,7 @@ Node* ConstructorBuiltinsAssembler::EmitFastCloneShallowObject(
Bind(&loop_check);
{
offset.Bind(IntPtrAdd(offset.value(), IntPtrConstant(kPointerSize)));
GotoUnless(IntPtrGreaterThanOrEqual(offset.value(), end_offset),
&loop_body);
GotoIfNot(IntPtrGreaterThanOrEqual(offset.value(), end_offset), &loop_body);
}
if (FLAG_allocation_site_pretenuring) {

View File

@ -6,6 +6,7 @@
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@ -24,6 +25,7 @@ Handle<Code> Builtins::NonPrimitiveToPrimitive(ToPrimitiveHint hint) {
}
namespace {
// ES6 section 7.1.1 ToPrimitive ( input [ , PreferredType ] )
void Generate_NonPrimitiveToPrimitive(CodeStubAssembler* assembler,
ToPrimitiveHint hint) {
@ -52,7 +54,8 @@ void Generate_NonPrimitiveToPrimitive(CodeStubAssembler* assembler,
{
// Invoke the {exotic_to_prim} method on the {input} with a string
// representation of the {hint}.
Callable callable = CodeFactory::Call(assembler->isolate());
Callable callable = CodeFactory::Call(
assembler->isolate(), ConvertReceiverMode::kNotNullOrUndefined);
Node* hint_string = assembler->HeapConstant(
assembler->factory()->ToPrimitiveHintString(hint));
Node* result = assembler->CallJS(callable, context, exotic_to_prim, input,
@ -93,7 +96,8 @@ void Generate_NonPrimitiveToPrimitive(CodeStubAssembler* assembler,
assembler->TailCallStub(callable, context, input);
}
}
} // anonymous namespace
} // namespace
void Builtins::Generate_NonPrimitiveToPrimitive_Default(
compiler::CodeAssemblerState* state) {
@ -177,7 +181,7 @@ void Builtins::Generate_ToString(compiler::CodeAssemblerState* state) {
Node* input_instance_type = assembler.LoadMapInstanceType(input_map);
Label not_string(&assembler);
assembler.GotoUnless(assembler.IsStringInstanceType(input_instance_type),
assembler.GotoIfNot(assembler.IsStringInstanceType(input_instance_type),
&not_string);
assembler.Return(input);
@ -185,8 +189,7 @@ void Builtins::Generate_ToString(compiler::CodeAssemblerState* state) {
assembler.Bind(&not_string);
{
assembler.GotoUnless(assembler.IsHeapNumberMap(input_map),
&not_heap_number);
assembler.GotoIfNot(assembler.IsHeapNumberMap(input_map), &not_heap_number);
assembler.Goto(&is_number);
}
@ -221,6 +224,7 @@ Handle<Code> Builtins::OrdinaryToPrimitive(OrdinaryToPrimitiveHint hint) {
}
namespace {
// 7.1.1.1 OrdinaryToPrimitive ( O, hint )
void Generate_OrdinaryToPrimitive(CodeStubAssembler* assembler,
OrdinaryToPrimitiveHint hint) {
@ -263,7 +267,8 @@ void Generate_OrdinaryToPrimitive(CodeStubAssembler* assembler,
assembler->Bind(&if_methodiscallable);
{
// Call the {method} on the {input}.
Callable callable = CodeFactory::Call(assembler->isolate());
Callable callable = CodeFactory::Call(
assembler->isolate(), ConvertReceiverMode::kNotNullOrUndefined);
Node* result = assembler->CallJS(callable, context, method, input);
var_result.Bind(result);
@ -287,7 +292,8 @@ void Generate_OrdinaryToPrimitive(CodeStubAssembler* assembler,
assembler->Bind(&return_result);
assembler->Return(var_result.value());
}
} // anonymous namespace
} // namespace
void Builtins::Generate_OrdinaryToPrimitive_Number(
compiler::CodeAssemblerState* state) {
@ -361,7 +367,7 @@ void Builtins::Generate_ToLength(compiler::CodeAssemblerState* state) {
Node* len_value = assembler.LoadHeapNumberValue(len);
// Check if {len} is not greater than zero.
assembler.GotoUnless(assembler.Float64GreaterThan(
assembler.GotoIfNot(assembler.Float64GreaterThan(
len_value, assembler.Float64Constant(0.0)),
&return_zero);
@ -474,6 +480,17 @@ void Builtins::Generate_ToObject(compiler::CodeAssemblerState* state) {
assembler.Return(object);
}
// Deprecated ES5 [[Class]] internal property (used to implement %_ClassOf).
void Builtins::Generate_ClassOf(compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;
typedef TypeofDescriptor Descriptor;
CodeStubAssembler assembler(state);
Node* object = assembler.Parameter(Descriptor::kObject);
assembler.Return(assembler.ClassOf(object));
}
// ES6 section 12.5.5 typeof operator
void Builtins::Generate_Typeof(compiler::CodeAssemblerState* state) {
typedef compiler::Node Node;

View File

@ -2,8 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/builtins/builtins.h"
#include "src/builtins/builtins-utils.h"
#include "src/builtins/builtins.h"
#include "src/conversions.h"
#include "src/counters.h"
#include "src/factory.h"
#include "src/isolate.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@ -42,8 +47,7 @@ BUILTIN(DataViewConstructor_ConstructStub) {
Handle<Object> offset;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, offset,
Object::ToIndex(isolate, byte_offset,
MessageTemplate::kInvalidDataViewOffset));
Object::ToIndex(isolate, byte_offset, MessageTemplate::kInvalidOffset));
// 5. If IsDetachedBuffer(buffer) is true, throw a TypeError exception.
// We currently violate the specification at this point.
@ -55,8 +59,7 @@ BUILTIN(DataViewConstructor_ConstructStub) {
// 7. If offset > bufferByteLength, throw a RangeError exception
if (offset->Number() > buffer_byte_length) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate,
NewRangeError(MessageTemplate::kInvalidDataViewOffset, offset));
isolate, NewRangeError(MessageTemplate::kInvalidOffset, offset));
}
Handle<Object> view_byte_length;

View File

@ -6,7 +6,10 @@
#include "src/builtins/builtins.h"
#include "src/code-factory.h"
#include "src/code-stub-assembler.h"
#include "src/conversions.h"
#include "src/counters.h"
#include "src/dateparser-inl.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
@ -302,8 +305,8 @@ BUILTIN(DateUTC) {
HandleScope scope(isolate);
int const argc = args.length() - 1;
double year = std::numeric_limits<double>::quiet_NaN();
double month = std::numeric_limits<double>::quiet_NaN();
double date = 1.0, hours = 0.0, minutes = 0.0, seconds = 0.0, ms = 0.0;
double month = 0.0, date = 1.0, hours = 0.0, minutes = 0.0, seconds = 0.0,
ms = 0.0;
if (argc >= 1) {
Handle<Object> year_object;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, year_object,
@ -945,8 +948,8 @@ void Generate_DatePrototype_GetField(CodeStubAssembler* assembler,
// Raise a TypeError if the receiver is not a date.
assembler->Bind(&receiver_not_date);
{
Node* result = assembler->CallRuntime(Runtime::kThrowNotDateError, context);
assembler->Return(result);
assembler->CallRuntime(Runtime::kThrowNotDateError, context);
assembler->Unreachable();
}
}
@ -1099,7 +1102,7 @@ void Builtins::Generate_DatePrototypeToPrimitive(
// Check if the {receiver} is actually a JSReceiver.
Label receiver_is_invalid(&assembler, Label::kDeferred);
assembler.GotoIf(assembler.TaggedIsSmi(receiver), &receiver_is_invalid);
assembler.GotoUnless(assembler.IsJSReceiver(receiver), &receiver_is_invalid);
assembler.GotoIfNot(assembler.IsJSReceiver(receiver), &receiver_is_invalid);
// Dispatch to the appropriate OrdinaryToPrimitive builtin.
Label hint_is_number(&assembler), hint_is_string(&assembler),
@ -1116,7 +1119,7 @@ void Builtins::Generate_DatePrototypeToPrimitive(
// Slow-case with actual string comparisons.
Callable string_equal = CodeFactory::StringEqual(assembler.isolate());
assembler.GotoIf(assembler.TaggedIsSmi(hint), &hint_is_invalid);
assembler.GotoUnless(assembler.IsString(hint), &hint_is_invalid);
assembler.GotoIfNot(assembler.IsString(hint), &hint_is_invalid);
assembler.GotoIf(assembler.WordEqual(assembler.CallStub(string_equal, context,
hint, number_string),
assembler.TrueConstant()),
@ -1152,20 +1155,19 @@ void Builtins::Generate_DatePrototypeToPrimitive(
// Raise a TypeError if the {hint} is invalid.
assembler.Bind(&hint_is_invalid);
{
Node* result =
assembler.CallRuntime(Runtime::kThrowInvalidHint, context, hint);
assembler.Return(result);
assembler.Unreachable();
}
// Raise a TypeError if the {receiver} is not a JSReceiver instance.
assembler.Bind(&receiver_is_invalid);
{
Node* result = assembler.CallRuntime(
assembler.CallRuntime(
Runtime::kThrowIncompatibleMethodReceiver, context,
assembler.HeapConstant(assembler.factory()->NewStringFromAsciiChecked(
"Date.prototype [ @@toPrimitive ]", TENURED)),
receiver);
assembler.Return(result);
assembler.Unreachable();
}
}

Some files were not shown because too many files have changed in this diff Show More