deps: update V8 to 6.4.388.40

PR-URL: https://github.com/nodejs/node/pull/17489
Reviewed-By: Colin Ihrig <cjihrig@gmail.com>
Reviewed-By: Matteo Collina <matteo.collina@gmail.com>
Reviewed-By: Myles Borins <myles.borins@gmail.com>
Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com>
This commit is contained in:
Michaël Zasso 2018-01-24 20:16:06 +01:00 committed by Myles Borins
parent fa9f31a4fd
commit 4c4af643e5
No known key found for this signature in database
GPG Key ID: 933B01F40B5CA946
2123 changed files with 93006 additions and 80472 deletions

3
deps/v8/.gitignore vendored
View File

@ -70,6 +70,8 @@
!/third_party/binutils
!/third_party/eu-strip
!/third_party/inspector_protocol
!/third_party/colorama
/third_party/colorama/src
/tools/clang
/tools/gcmole/gcmole-tools
/tools/gcmole/gcmole-tools.tar.gz
@ -103,5 +105,6 @@ turbo*.cfg
turbo*.dot
turbo*.json
v8.ignition_dispatches_table.json
/Default/
!/third_party/jinja2
!/third_party/markupsafe

32
deps/v8/.vpython vendored Normal file
View File

@ -0,0 +1,32 @@
# This is a vpython "spec" file.
#
# It describes patterns for python wheel dependencies of the python scripts in
# the chromium repo, particularly for dependencies that have compiled components
# (since pure-python dependencies can be easily vendored into third_party).
#
# When vpython is invoked, it finds this file and builds a python VirtualEnv,
# containing all of the dependencies described in this file, fetching them from
# CIPD (the "Chrome Infrastructure Package Deployer" service). Unlike `pip`,
# this never requires the end-user machine to have a working python extension
# compilation environment. All of these packages are built using:
# https://chromium.googlesource.com/infra/infra/+/master/infra/tools/dockerbuild/
#
# All python scripts in the repo share this same spec, to avoid dependency
# fragmentation.
#
# If you have depot_tools installed in your $PATH, you can invoke python scripts
# in this repo by running them as you normally would run them, except
# substituting `vpython` instead of `python` on the command line, e.g.:
# vpython path/to/script.py some --arguments
#
# Read more about `vpython` and how to modify this file here:
# https://chromium.googlesource.com/infra/infra/+/master/doc/users/vpython.md
python_version: "2.7"
# Needed by third_party/catapult/devil/devil, which is imported by
# build/android/test_runner.py when running performance tests.
wheel: <
name: "infra/python/wheels/psutil/${vpython_platform}"
version: "version:5.2.2"
>

View File

@ -42,7 +42,7 @@ import sys
# Flags from YCM's default config.
flags = [
'-DUSE_CLANG_COMPLETER',
'-std=gnu++11',
'-std=gnu++14',
'-x',
'c++',
]

9
deps/v8/AUTHORS vendored
View File

@ -1,4 +1,4 @@
# Below is a list of people and organizations that have contributed
# Below is a list of people and organizations that have contributed
# to the V8 project. Names should be added to the list like so:
#
# Name/Organization <email address>
@ -31,6 +31,7 @@ StrongLoop, Inc. <*@strongloop.com>
Facebook, Inc. <*@fb.com>
Facebook, Inc. <*@oculus.com>
Vewd Software AS <*@vewd.com>
Groupon <*@groupon.com>
Aaron Bieber <deftly@gmail.com>
Abdulla Kamar <abdulla.kamar@gmail.com>
@ -45,6 +46,7 @@ Andrew Paprocki <andrew@ishiboo.com>
Andrei Kashcha <anvaka@gmail.com>
Anna Henningsen <anna@addaleax.net>
Bangfu Tao <bangfu.tao@samsung.com>
Ben Coe <ben@npmjs.com>
Ben Noordhuis <info@bnoordhuis.nl>
Benjamin Tan <demoneaux@gmail.com>
Bert Belder <bertbelder@gmail.com>
@ -54,6 +56,7 @@ Craig Schlenter <craig.schlenter@gmail.com>
Choongwoo Han <cwhan.tunz@gmail.com>
Chris Nardi <hichris123@gmail.com>
Christopher A. Taylor <chris@gameclosure.com>
Colin Ihrig <cjihrig@gmail.com>
Daniel Andersson <kodandersson@gmail.com>
Daniel Bevenius <daniel.bevenius@gmail.com>
Daniel James <dnljms@gmail.com>
@ -75,6 +78,7 @@ Ioseb Dzmanashvili <ioseb.dzmanashvili@gmail.com>
Isiah Meadows <impinball@gmail.com>
Jaime Bernardo <jaime@janeasystems.com>
Jan de Mooij <jandemooij@gmail.com>
Jan Krems <jan.krems@gmail.com>
Jay Freeman <saurik@saurik.com>
James Pike <g00gle@chilon.net>
Jianghua Yang <jianghua.yjh@alibaba-inc.com>
@ -86,6 +90,7 @@ JunHo Seo <sejunho@gmail.com>
Kang-Hao (Kenny) Lu <kennyluck@csail.mit.edu>
Karl Skomski <karl@skomski.com>
Kevin Gibbons <bakkot@gmail.com>
Kris Selden <kris.selden@gmail.com>
Loo Rong Jie <loorongjie@gmail.com>
Luis Reis <luis.m.reis@gmail.com>
Luke Zarko <lukezarko@gmail.com>
@ -127,12 +132,14 @@ Sandro Santilli <strk@keybit.net>
Sanjoy Das <sanjoy@playingwithpointers.com>
Seo Sanghyeon <sanxiyn@gmail.com>
Stefan Penner <stefan.penner@gmail.com>
Sylvestre Ledru <sledru@mozilla.com>
Tobias Burnus <burnus@net-b.de>
Victor Costan <costan@gmail.com>
Vlad Burlik <vladbph@gmail.com>
Vladimir Krivosheev <develar@gmail.com>
Vladimir Shutoff <vovan@shutoff.ru>
Wiktor Garbacz <wiktor.garbacz@gmail.com>
Yong Wang <ccyongwang@tencent.com>
Yu Yin <xwafish@gmail.com>
Zac Hansen <xaxxon@gmail.com>
Zhongping Wang <kewpie.w.zp@gmail.com>

335
deps/v8/BUILD.gn vendored
View File

@ -83,11 +83,11 @@ declare_args() {
# Sets -dV8_TRACE_IGNITION.
v8_enable_trace_ignition = false
# Sets -dV8_CONCURRENT_MARKING
v8_enable_concurrent_marking = false
# Sets -dV8_TRACE_FEEDBACK_UPDATES.
v8_enable_trace_feedback_updates = false
# Sets -dV8_CSA_WRITE_BARRIER
v8_enable_csa_write_barrier = true
# Sets -dV8_CONCURRENT_MARKING
v8_enable_concurrent_marking = true
# Build the snapshot with unwinding information for perf.
# Sets -dV8_USE_SNAPSHOT_WITH_UNWINDING_INFO.
@ -132,6 +132,11 @@ declare_args() {
# Temporary flag to allow embedders to update their microtasks scopes
# while rolling in a new version of V8.
v8_check_microtasks_scopes_consistency = ""
v8_monolithic = false
# Enable mitigations for executing untrusted code.
v8_untrusted_code_mitigations = true
}
# Derived defaults.
@ -270,6 +275,9 @@ config("features") {
if (v8_enable_trace_ignition) {
defines += [ "V8_TRACE_IGNITION" ]
}
if (v8_enable_trace_feedback_updates) {
defines += [ "V8_TRACE_FEEDBACK_UPDATES" ]
}
if (v8_enable_v8_checks) {
defines += [ "V8_ENABLE_CHECKS" ]
}
@ -300,9 +308,6 @@ config("features") {
if (v8_enable_concurrent_marking) {
defines += [ "V8_CONCURRENT_MARKING" ]
}
if (v8_enable_csa_write_barrier) {
defines += [ "V8_CSA_WRITE_BARRIER" ]
}
if (v8_check_microtasks_scopes_consistency) {
defines += [ "V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY" ]
}
@ -488,6 +493,10 @@ config("toolchain") {
defines += [ "ENABLE_VERIFY_CSA" ]
}
if (!v8_untrusted_code_mitigations) {
defines += [ "DISABLE_UNTRUSTED_CODE_MITIGATIONS" ]
}
if (v8_no_inline) {
cflags += [
"-fno-inline-functions",
@ -568,9 +577,7 @@ action("js2c") {
"src/js/prologue.js",
"src/js/v8natives.js",
"src/js/array.js",
"src/js/string.js",
"src/js/typedarray.js",
"src/js/weak-collection.js",
"src/js/messages.js",
"src/js/spread.js",
"src/js/proxy.js",
@ -746,6 +753,12 @@ action("postmortem-metadata") {
sources = [
"src/objects.h",
"src/objects-inl.h",
"src/objects/code-inl.h",
"src/objects/code.h",
"src/objects/js-array-inl.h",
"src/objects/js-array.h",
"src/objects/js-regexp-inl.h",
"src/objects/js-regexp.h",
"src/objects/map.h",
"src/objects/map-inl.h",
"src/objects/script.h",
@ -764,65 +777,68 @@ action("postmortem-metadata") {
rebase_path(sources, root_build_dir)
}
action("run_mksnapshot") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
if (v8_use_snapshot) {
action("run_mksnapshot") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
deps = [
":mksnapshot($v8_snapshot_toolchain)",
]
script = "tools/run.py"
sources = []
outputs = [
"$target_gen_dir/snapshot.cc",
]
args = [
"./" + rebase_path(get_label_info(":mksnapshot($v8_snapshot_toolchain)",
"root_out_dir") + "/mksnapshot",
root_build_dir),
"--startup_src",
rebase_path("$target_gen_dir/snapshot.cc", root_build_dir),
]
if (v8_random_seed != "0") {
args += [
"--random-seed",
v8_random_seed,
deps = [
":mksnapshot($v8_snapshot_toolchain)",
]
}
if (v8_os_page_size != "0") {
args += [
"--v8_os_page_size",
v8_os_page_size,
script = "tools/run.py"
sources = []
outputs = [
"$target_gen_dir/snapshot.cc",
]
}
if (v8_perf_prof_unwinding_info) {
args += [ "--perf-prof-unwinding-info" ]
}
if (v8_use_external_startup_data) {
outputs += [ "$root_out_dir/snapshot_blob.bin" ]
args += [
"--startup_blob",
rebase_path("$root_out_dir/snapshot_blob.bin", root_build_dir),
args = [
"./" + rebase_path(get_label_info(":mksnapshot($v8_snapshot_toolchain)",
"root_out_dir") + "/mksnapshot",
root_build_dir),
"--turbo_instruction_scheduling",
"--startup_src",
rebase_path("$target_gen_dir/snapshot.cc", root_build_dir),
]
}
if (v8_embed_script != "") {
sources += [ v8_embed_script ]
args += [ rebase_path(v8_embed_script, root_build_dir) ]
}
if (v8_random_seed != "0") {
args += [
"--random-seed",
v8_random_seed,
]
}
if (v8_enable_fast_mksnapshot) {
args += [
"--no-turbo-rewrite-far-jumps",
"--no-turbo-verify-allocation",
]
if (v8_os_page_size != "0") {
args += [
"--v8_os_page_size",
v8_os_page_size,
]
}
if (v8_perf_prof_unwinding_info) {
args += [ "--perf-prof-unwinding-info" ]
}
if (v8_use_external_startup_data) {
outputs += [ "$root_out_dir/snapshot_blob.bin" ]
args += [
"--startup_blob",
rebase_path("$root_out_dir/snapshot_blob.bin", root_build_dir),
]
}
if (v8_embed_script != "") {
sources += [ v8_embed_script ]
args += [ rebase_path(v8_embed_script, root_build_dir) ]
}
if (v8_enable_fast_mksnapshot) {
args += [
"--no-turbo-rewrite-far-jumps",
"--no-turbo-verify-allocation",
]
}
}
}
@ -834,6 +850,7 @@ action("v8_dump_build_config") {
is_gcov_coverage = v8_code_coverage && !is_clang
args = [
rebase_path("$root_out_dir/v8_build_config.json", root_build_dir),
"current_cpu=\"$current_cpu\"",
"dcheck_always_on=$dcheck_always_on",
"is_asan=$is_asan",
"is_cfi=$is_cfi",
@ -844,7 +861,9 @@ action("v8_dump_build_config") {
"is_tsan=$is_tsan",
"is_ubsan_vptr=$is_ubsan_vptr",
"target_cpu=\"$target_cpu\"",
"v8_current_cpu=\"$v8_current_cpu\"",
"v8_enable_i18n_support=$v8_enable_i18n_support",
"v8_enable_verify_predictable=$v8_enable_verify_predictable",
"v8_target_cpu=\"$v8_target_cpu\"",
"v8_use_snapshot=$v8_use_snapshot",
]
@ -901,44 +920,46 @@ v8_source_set("v8_nosnapshot") {
configs = [ ":internal_config" ]
}
v8_source_set("v8_snapshot") {
# Only targets in this file and the top-level visibility target can
# depend on this.
visibility = [
":*",
"//:gn_visibility",
]
deps = [
":js2c",
":js2c_experimental_extras",
":js2c_extras",
":v8_base",
]
public_deps = [
# This should be public so downstream targets can declare the snapshot
# output file as their inputs.
":run_mksnapshot",
]
sources = [
"$target_gen_dir/experimental-extras-libraries.cc",
"$target_gen_dir/extras-libraries.cc",
"$target_gen_dir/libraries.cc",
"$target_gen_dir/snapshot.cc",
"src/setup-isolate-deserialize.cc",
]
if (use_jumbo_build == true) {
jumbo_excluded_sources = [
# TODO(mostynb@opera.com): don't exclude these http://crbug.com/752428
# Generated source, contains same variable names as libraries.cc
"$target_gen_dir/experimental-extras-libraries.cc",
"$target_gen_dir/libraries.cc",
if (v8_use_snapshot) {
v8_source_set("v8_snapshot") {
# Only targets in this file and the top-level visibility target can
# depend on this.
visibility = [
":*",
"//:gn_visibility",
]
}
configs = [ ":internal_config" ]
deps = [
":js2c",
":js2c_experimental_extras",
":js2c_extras",
":v8_base",
]
public_deps = [
# This should be public so downstream targets can declare the snapshot
# output file as their inputs.
":run_mksnapshot",
]
sources = [
"$target_gen_dir/experimental-extras-libraries.cc",
"$target_gen_dir/extras-libraries.cc",
"$target_gen_dir/libraries.cc",
"$target_gen_dir/snapshot.cc",
"src/setup-isolate-deserialize.cc",
]
if (use_jumbo_build == true) {
jumbo_excluded_sources = [
# TODO(mostynb@opera.com): don't exclude these http://crbug.com/752428
# Generated source, contains same variable names as libraries.cc
"$target_gen_dir/experimental-extras-libraries.cc",
"$target_gen_dir/libraries.cc",
]
}
configs = [ ":internal_config" ]
}
}
if (v8_use_external_startup_data) {
@ -1008,12 +1029,14 @@ v8_source_set("v8_initializers") {
"src/builtins/builtins-iterator-gen.cc",
"src/builtins/builtins-iterator-gen.h",
"src/builtins/builtins-math-gen.cc",
"src/builtins/builtins-math-gen.h",
"src/builtins/builtins-number-gen.cc",
"src/builtins/builtins-object-gen.cc",
"src/builtins/builtins-promise-gen.cc",
"src/builtins/builtins-promise-gen.h",
"src/builtins/builtins-proxy-gen.cc",
"src/builtins/builtins-proxy-gen.h",
"src/builtins/builtins-reflect-gen.cc",
"src/builtins/builtins-regexp-gen.cc",
"src/builtins/builtins-regexp-gen.h",
"src/builtins/builtins-sharedarraybuffer-gen.cc",
@ -1195,8 +1218,6 @@ v8_source_set("v8_base") {
"src/assembler.h",
"src/assert-scope.cc",
"src/assert-scope.h",
"src/ast/ast-expression-rewriter.cc",
"src/ast/ast-expression-rewriter.h",
"src/ast/ast-function-literal-id-reindexer.cc",
"src/ast/ast-function-literal-id-reindexer.h",
"src/ast/ast-numbering.cc",
@ -1219,8 +1240,6 @@ v8_source_set("v8_base") {
"src/ast/scopes.h",
"src/ast/variables.cc",
"src/ast/variables.h",
"src/background-parsing-task.cc",
"src/background-parsing-task.h",
"src/bailout-reason.cc",
"src/bailout-reason.h",
"src/basic-block-profiler.cc",
@ -1315,6 +1334,7 @@ v8_source_set("v8_base") {
"src/compiler/access-info.h",
"src/compiler/all-nodes.cc",
"src/compiler/all-nodes.h",
"src/compiler/allocation-builder.h",
"src/compiler/basic-block-instrumentor.cc",
"src/compiler/basic-block-instrumentor.h",
"src/compiler/branch-elimination.cc",
@ -1607,6 +1627,8 @@ v8_source_set("v8_base") {
"src/handles.cc",
"src/handles.h",
"src/heap-symbols.h",
"src/heap/array-buffer-collector.cc",
"src/heap/array-buffer-collector.h",
"src/heap/array-buffer-tracker-inl.h",
"src/heap/array-buffer-tracker.cc",
"src/heap/array-buffer-tracker.h",
@ -1658,14 +1680,11 @@ v8_source_set("v8_base") {
"src/heap/spaces.h",
"src/heap/store-buffer.cc",
"src/heap/store-buffer.h",
"src/heap/sweeper.cc",
"src/heap/sweeper.h",
"src/heap/worklist.h",
"src/ic/access-compiler-data.h",
"src/ic/access-compiler.cc",
"src/ic/access-compiler.h",
"src/ic/call-optimization.cc",
"src/ic/call-optimization.h",
"src/ic/handler-compiler.cc",
"src/ic/handler-compiler.h",
"src/ic/handler-configuration-inl.h",
"src/ic/handler-configuration.cc",
"src/ic/handler-configuration.h",
@ -1773,9 +1792,10 @@ v8_source_set("v8_base") {
"src/objects.h",
"src/objects/arguments-inl.h",
"src/objects/arguments.h",
"src/objects/bigint-inl.h",
"src/objects/bigint.cc",
"src/objects/bigint.h",
"src/objects/code-inl.h",
"src/objects/code.h",
"src/objects/compilation-cache-inl.h",
"src/objects/compilation-cache.h",
"src/objects/debug-objects-inl.h",
@ -1789,6 +1809,11 @@ v8_source_set("v8_base") {
"src/objects/hash-table.h",
"src/objects/intl-objects.cc",
"src/objects/intl-objects.h",
"src/objects/js-array-inl.h",
"src/objects/js-array.h",
"src/objects/js-regexp-inl.h",
"src/objects/js-regexp.h",
"src/objects/literal-objects-inl.h",
"src/objects/literal-objects.cc",
"src/objects/literal-objects.h",
"src/objects/map-inl.h",
@ -1816,6 +1841,8 @@ v8_source_set("v8_base") {
"src/objects/template-objects.h",
"src/ostreams.cc",
"src/ostreams.h",
"src/parsing/background-parsing-task.cc",
"src/parsing/background-parsing-task.h",
"src/parsing/duplicate-finder.h",
"src/parsing/expression-classifier.h",
"src/parsing/expression-scope-reparenter.cc",
@ -1948,12 +1975,20 @@ v8_source_set("v8_base") {
"src/setup-isolate.h",
"src/signature.h",
"src/simulator.h",
"src/snapshot/builtin-deserializer-allocator.cc",
"src/snapshot/builtin-deserializer-allocator.h",
"src/snapshot/builtin-deserializer.cc",
"src/snapshot/builtin-deserializer.h",
"src/snapshot/builtin-serializer-allocator.cc",
"src/snapshot/builtin-serializer-allocator.h",
"src/snapshot/builtin-serializer.cc",
"src/snapshot/builtin-serializer.h",
"src/snapshot/builtin-snapshot-utils.cc",
"src/snapshot/builtin-snapshot-utils.h",
"src/snapshot/code-serializer.cc",
"src/snapshot/code-serializer.h",
"src/snapshot/default-deserializer-allocator.cc",
"src/snapshot/default-deserializer-allocator.h",
"src/snapshot/default-serializer-allocator.cc",
"src/snapshot/default-serializer-allocator.h",
"src/snapshot/deserializer.cc",
@ -2038,6 +2073,9 @@ v8_source_set("v8_base") {
"src/visitors.h",
"src/vm-state-inl.h",
"src/vm-state.h",
"src/wasm/baseline/liftoff-assembler.cc",
"src/wasm/baseline/liftoff-assembler.h",
"src/wasm/baseline/liftoff-compiler.cc",
"src/wasm/compilation-manager.cc",
"src/wasm/compilation-manager.h",
"src/wasm/decoder.h",
@ -2061,6 +2099,8 @@ v8_source_set("v8_base") {
"src/wasm/wasm-api.h",
"src/wasm/wasm-code-specialization.cc",
"src/wasm/wasm-code-specialization.h",
"src/wasm/wasm-code-wrapper.cc",
"src/wasm/wasm-code-wrapper.h",
"src/wasm/wasm-debug.cc",
"src/wasm/wasm-external-refs.cc",
"src/wasm/wasm-external-refs.h",
@ -2084,6 +2124,8 @@ v8_source_set("v8_base") {
"src/wasm/wasm-opcodes.h",
"src/wasm/wasm-result.cc",
"src/wasm/wasm-result.h",
"src/wasm/wasm-serialization.cc",
"src/wasm/wasm-serialization.h",
"src/wasm/wasm-text.cc",
"src/wasm/wasm-text.h",
"src/wasm/wasm-value.h",
@ -2128,9 +2170,7 @@ v8_source_set("v8_base") {
"src/ia32/assembler-ia32.cc",
"src/ia32/assembler-ia32.h",
"src/ia32/code-stubs-ia32.cc",
"src/ia32/code-stubs-ia32.h",
"src/ia32/codegen-ia32.cc",
"src/ia32/codegen-ia32.h",
"src/ia32/cpu-ia32.cc",
"src/ia32/deoptimizer-ia32.cc",
"src/ia32/disasm-ia32.cc",
@ -2142,10 +2182,10 @@ v8_source_set("v8_base") {
"src/ia32/simulator-ia32.cc",
"src/ia32/simulator-ia32.h",
"src/ia32/sse-instr.h",
"src/ic/ia32/access-compiler-ia32.cc",
"src/ic/ia32/handler-compiler-ia32.cc",
"src/regexp/ia32/regexp-macro-assembler-ia32.cc",
"src/regexp/ia32/regexp-macro-assembler-ia32.h",
"src/wasm/baseline/ia32/liftoff-assembler-ia32-defs.h",
"src/wasm/baseline/ia32/liftoff-assembler-ia32.h",
]
} else if (v8_current_cpu == "x64") {
sources += [ ### gcmole(arch:x64) ###
@ -2156,18 +2196,16 @@ v8_source_set("v8_base") {
"src/compiler/x64/unwinding-info-writer-x64.cc",
"src/compiler/x64/unwinding-info-writer-x64.h",
"src/debug/x64/debug-x64.cc",
"src/ic/x64/access-compiler-x64.cc",
"src/ic/x64/handler-compiler-x64.cc",
"src/regexp/x64/regexp-macro-assembler-x64.cc",
"src/regexp/x64/regexp-macro-assembler-x64.h",
"src/third_party/valgrind/valgrind.h",
"src/wasm/baseline/x64/liftoff-assembler-x64-defs.h",
"src/wasm/baseline/x64/liftoff-assembler-x64.h",
"src/x64/assembler-x64-inl.h",
"src/x64/assembler-x64.cc",
"src/x64/assembler-x64.h",
"src/x64/code-stubs-x64.cc",
"src/x64/code-stubs-x64.h",
"src/x64/codegen-x64.cc",
"src/x64/codegen-x64.h",
"src/x64/cpu-x64.cc",
"src/x64/deoptimizer-x64.cc",
"src/x64/disasm-x64.cc",
@ -2192,7 +2230,6 @@ v8_source_set("v8_base") {
"src/arm/code-stubs-arm.cc",
"src/arm/code-stubs-arm.h",
"src/arm/codegen-arm.cc",
"src/arm/codegen-arm.h",
"src/arm/constants-arm.cc",
"src/arm/constants-arm.h",
"src/arm/cpu-arm.cc",
@ -2214,10 +2251,10 @@ v8_source_set("v8_base") {
"src/compiler/arm/unwinding-info-writer-arm.cc",
"src/compiler/arm/unwinding-info-writer-arm.h",
"src/debug/arm/debug-arm.cc",
"src/ic/arm/access-compiler-arm.cc",
"src/ic/arm/handler-compiler-arm.cc",
"src/regexp/arm/regexp-macro-assembler-arm.cc",
"src/regexp/arm/regexp-macro-assembler-arm.h",
"src/wasm/baseline/arm/liftoff-assembler-arm-defs.h",
"src/wasm/baseline/arm/liftoff-assembler-arm.h",
]
} else if (v8_current_cpu == "arm64") {
sources += [ ### gcmole(arch:arm64) ###
@ -2227,7 +2264,6 @@ v8_source_set("v8_base") {
"src/arm64/code-stubs-arm64.cc",
"src/arm64/code-stubs-arm64.h",
"src/arm64/codegen-arm64.cc",
"src/arm64/codegen-arm64.h",
"src/arm64/constants-arm64.h",
"src/arm64/cpu-arm64.cc",
"src/arm64/decoder-arm64-inl.h",
@ -2261,10 +2297,10 @@ v8_source_set("v8_base") {
"src/compiler/arm64/unwinding-info-writer-arm64.cc",
"src/compiler/arm64/unwinding-info-writer-arm64.h",
"src/debug/arm64/debug-arm64.cc",
"src/ic/arm64/access-compiler-arm64.cc",
"src/ic/arm64/handler-compiler-arm64.cc",
"src/regexp/arm64/regexp-macro-assembler-arm64.cc",
"src/regexp/arm64/regexp-macro-assembler-arm64.h",
"src/wasm/baseline/arm64/liftoff-assembler-arm64-defs.h",
"src/wasm/baseline/arm64/liftoff-assembler-arm64.h",
]
if (use_jumbo_build) {
jumbo_excluded_sources += [
@ -2280,15 +2316,12 @@ v8_source_set("v8_base") {
"src/compiler/mips/instruction-scheduler-mips.cc",
"src/compiler/mips/instruction-selector-mips.cc",
"src/debug/mips/debug-mips.cc",
"src/ic/mips/access-compiler-mips.cc",
"src/ic/mips/handler-compiler-mips.cc",
"src/mips/assembler-mips-inl.h",
"src/mips/assembler-mips.cc",
"src/mips/assembler-mips.h",
"src/mips/code-stubs-mips.cc",
"src/mips/code-stubs-mips.h",
"src/mips/codegen-mips.cc",
"src/mips/codegen-mips.h",
"src/mips/constants-mips.cc",
"src/mips/constants-mips.h",
"src/mips/cpu-mips.cc",
@ -2303,6 +2336,8 @@ v8_source_set("v8_base") {
"src/mips/simulator-mips.h",
"src/regexp/mips/regexp-macro-assembler-mips.cc",
"src/regexp/mips/regexp-macro-assembler-mips.h",
"src/wasm/baseline/mips/liftoff-assembler-mips-defs.h",
"src/wasm/baseline/mips/liftoff-assembler-mips.h",
]
} else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
sources += [ ### gcmole(arch:mips64el) ###
@ -2311,15 +2346,12 @@ v8_source_set("v8_base") {
"src/compiler/mips64/instruction-scheduler-mips64.cc",
"src/compiler/mips64/instruction-selector-mips64.cc",
"src/debug/mips64/debug-mips64.cc",
"src/ic/mips64/access-compiler-mips64.cc",
"src/ic/mips64/handler-compiler-mips64.cc",
"src/mips64/assembler-mips64-inl.h",
"src/mips64/assembler-mips64.cc",
"src/mips64/assembler-mips64.h",
"src/mips64/code-stubs-mips64.cc",
"src/mips64/code-stubs-mips64.h",
"src/mips64/codegen-mips64.cc",
"src/mips64/codegen-mips64.h",
"src/mips64/constants-mips64.cc",
"src/mips64/constants-mips64.h",
"src/mips64/cpu-mips64.cc",
@ -2334,6 +2366,8 @@ v8_source_set("v8_base") {
"src/mips64/simulator-mips64.h",
"src/regexp/mips64/regexp-macro-assembler-mips64.cc",
"src/regexp/mips64/regexp-macro-assembler-mips64.h",
"src/wasm/baseline/mips64/liftoff-assembler-mips64-defs.h",
"src/wasm/baseline/mips64/liftoff-assembler-mips64.h",
]
} else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") {
sources += [ ### gcmole(arch:ppc) ###
@ -2342,15 +2376,12 @@ v8_source_set("v8_base") {
"src/compiler/ppc/instruction-scheduler-ppc.cc",
"src/compiler/ppc/instruction-selector-ppc.cc",
"src/debug/ppc/debug-ppc.cc",
"src/ic/ppc/access-compiler-ppc.cc",
"src/ic/ppc/handler-compiler-ppc.cc",
"src/ppc/assembler-ppc-inl.h",
"src/ppc/assembler-ppc.cc",
"src/ppc/assembler-ppc.h",
"src/ppc/code-stubs-ppc.cc",
"src/ppc/code-stubs-ppc.h",
"src/ppc/codegen-ppc.cc",
"src/ppc/codegen-ppc.h",
"src/ppc/constants-ppc.cc",
"src/ppc/constants-ppc.h",
"src/ppc/cpu-ppc.cc",
@ -2365,6 +2396,8 @@ v8_source_set("v8_base") {
"src/ppc/simulator-ppc.h",
"src/regexp/ppc/regexp-macro-assembler-ppc.cc",
"src/regexp/ppc/regexp-macro-assembler-ppc.h",
"src/wasm/baseline/ppc/liftoff-assembler-ppc-defs.h",
"src/wasm/baseline/ppc/liftoff-assembler-ppc.h",
]
} else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
sources += [ ### gcmole(arch:s390) ###
@ -2373,8 +2406,6 @@ v8_source_set("v8_base") {
"src/compiler/s390/instruction-scheduler-s390.cc",
"src/compiler/s390/instruction-selector-s390.cc",
"src/debug/s390/debug-s390.cc",
"src/ic/s390/access-compiler-s390.cc",
"src/ic/s390/handler-compiler-s390.cc",
"src/regexp/s390/regexp-macro-assembler-s390.cc",
"src/regexp/s390/regexp-macro-assembler-s390.h",
"src/s390/assembler-s390-inl.h",
@ -2383,7 +2414,6 @@ v8_source_set("v8_base") {
"src/s390/code-stubs-s390.cc",
"src/s390/code-stubs-s390.h",
"src/s390/codegen-s390.cc",
"src/s390/codegen-s390.h",
"src/s390/constants-s390.cc",
"src/s390/constants-s390.h",
"src/s390/cpu-s390.cc",
@ -2396,6 +2426,8 @@ v8_source_set("v8_base") {
"src/s390/macro-assembler-s390.h",
"src/s390/simulator-s390.cc",
"src/s390/simulator-s390.h",
"src/wasm/baseline/s390/liftoff-assembler-s390-defs.h",
"src/wasm/baseline/s390/liftoff-assembler-s390.h",
]
}
@ -2597,6 +2629,10 @@ v8_component("v8_libplatform") {
"include/libplatform/libplatform-export.h",
"include/libplatform/libplatform.h",
"include/libplatform/v8-tracing.h",
"src/libplatform/default-background-task-runner.cc",
"src/libplatform/default-background-task-runner.h",
"src/libplatform/default-foreground-task-runner.cc",
"src/libplatform/default-foreground-task-runner.h",
"src/libplatform/default-platform.cc",
"src/libplatform/default-platform.h",
"src/libplatform/task-queue.cc",
@ -2660,11 +2696,35 @@ v8_source_set("fuzzer_support") {
]
}
###############################################################################
# Produce a single static library for embedders
#
if (v8_monolithic) {
# A component build is not monolithic.
assert(!is_component_build)
# Using external startup data would produce separate files.
assert(!v8_use_external_startup_data)
v8_static_library("v8_monolith") {
deps = [
":v8",
":v8_libbase",
":v8_libplatform",
":v8_libsampler",
"//build/config:exe_and_shlib_deps",
"//build/win:default_exe_manifest",
]
configs = [ ":internal_config" ]
}
}
###############################################################################
# Executables
#
if (current_toolchain == v8_snapshot_toolchain) {
if (v8_use_snapshot && current_toolchain == v8_snapshot_toolchain) {
v8_executable("mksnapshot") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
@ -2719,6 +2779,8 @@ group("gn_all") {
}
group("v8_clusterfuzz") {
testonly = true
deps = [
":d8",
]
@ -2731,6 +2793,13 @@ group("v8_clusterfuzz") {
":d8(//build/toolchain/linux:clang_x86_v8_arm)",
]
}
if (v8_test_isolation_mode != "noop") {
deps += [
"tools:run-deopt-fuzzer_run",
"tools:run-num-fuzzer_run",
]
}
}
group("v8_archive") {

1970
deps/v8/ChangeLog vendored

File diff suppressed because it is too large Load Diff

79
deps/v8/DEPS vendored
View File

@ -3,28 +3,33 @@
# all paths in here must match this assumption.
vars = {
'checkout_instrumented_libraries': False,
'chromium_url': 'https://chromium.googlesource.com',
}
deps = {
'v8/build':
Var('chromium_url') + '/chromium/src/build.git' + '@' + 'adaf9e56105b814105e2d49bc4fa63e2cd4795f5',
Var('chromium_url') + '/chromium/src/build.git' + '@' + '9338ce52d0b9bcef34c38285fbd5023b62739fac',
'v8/tools/gyp':
Var('chromium_url') + '/external/gyp.git' + '@' + 'd61a9397e668fa9843c4aa7da9e79460fe590bfb',
'v8/third_party/icu':
Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '21d33b1a09a77f033478ea4ffffb61e6970f83bd',
Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '741688ebf328da9adc52505248bf4e2ef868722c',
'v8/third_party/instrumented_libraries':
Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '644afd349826cb68204226a16c38bde13abe9c3c',
Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '28417458ac4dc79f68915079d0f283f682504cc0',
'v8/buildtools':
Var('chromium_url') + '/chromium/buildtools.git' + '@' + 'f6d165d9d842ddd29056c127a5f3a3c5d8e0d2e3',
Var('chromium_url') + '/chromium/buildtools.git' + '@' + '505de88083136eefd056e5ee4ca0f01fe9b33de8',
'v8/base/trace_event/common':
Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + 'abcc4153b783b5e2c2dafcfbf658017ecb56989a',
Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '0e9a47d74970bee1bbfc063c47215406f8918699',
'v8/third_party/android_tools': {
'url': Var('chromium_url') + '/android_tools.git' + '@' + 'ca9dc7245b888c75307f0619e4a39fb46a82de66',
'url': Var('chromium_url') + '/android_tools.git' + '@' + 'a2e9bc7c1b41d983577907df51d339fb1e0fd02f',
'condition': 'checkout_android',
},
'v8/third_party/catapult': {
'url': Var('chromium_url') + '/catapult.git' + '@' + 'a48a6afde0ff7eeb1c847744192977e412107d6a',
'url': Var('chromium_url') + '/catapult.git' + '@' + '11d7efb857ae77eff1cea4640e3f3d9ac49cba0a',
'condition': 'checkout_android',
},
'v8/third_party/colorama/src': {
'url': Var('chromium_url') + '/external/colorama.git' + '@' + '799604a1041e9b3bc5d2789ecbd7e8db2e18e6b8',
'condition': 'checkout_android',
},
'v8/third_party/jinja2':
@ -32,7 +37,7 @@ deps = {
'v8/third_party/markupsafe':
Var('chromium_url') + '/chromium/src/third_party/markupsafe.git' + '@' + '8f45f5cfa0009d2a70589bcda0349b8cb2b72783',
'v8/tools/swarming_client':
Var('chromium_url') + '/infra/luci/client-py.git' + '@' + '5e8001d9a710121ce7a68efd0804430a34b4f9e4',
Var('chromium_url') + '/infra/luci/client-py.git' + '@' + '4bd9152f8a975d57c972c071dfb4ddf668e02200',
'v8/testing/gtest':
Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '6f8a66431cb592dad629028a50b3dd418a408c87',
'v8/testing/gmock':
@ -42,15 +47,15 @@ deps = {
'v8/test/mozilla/data':
Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be',
'v8/test/test262/data':
Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '290799bbeeba86245a355894b6ff2bb33d946d9e',
Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '5d4c667b271a9b39d0de73aef5ffe6879c6f8811',
'v8/test/test262/harness':
Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '0f2acdd882c84cff43b9d60df7574a1901e2cdcd',
'v8/tools/clang':
Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'b3169f97cc1a9daa1a9fbae15752588079792098',
Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '8688d267571de76a56746324dcc249bf4232b85a',
'v8/tools/luci-go':
Var('chromium_url') + '/chromium/src/tools/luci-go.git' + '@' + '9f54aa9fe06499b6bac378ae1f045be2158cf2cc',
Var('chromium_url') + '/chromium/src/tools/luci-go.git' + '@' + '45a8a51fda92e123619a69e7644d9c64a320b0c1',
'v8/test/wasm-js':
Var('chromium_url') + '/external/github.com/WebAssembly/spec.git' + '@' + '89573ee3eabc690637deeb1b8dadec13a963ec30',
Var('chromium_url') + '/external/github.com/WebAssembly/spec.git' + '@' + 'a7e226a92e660a3d5413cfea4269824f513259d2',
}
recursedeps = [
@ -248,15 +253,26 @@ hooks = [
],
},
{
# Pull sanitizer-instrumented third-party libraries if requested via
# GYP_DEFINES.
'name': 'instrumented_libraries',
'pattern': '\\.sha1',
# TODO(machenbach): Insert condition and remove GYP_DEFINES dependency.
'action': [
'python',
'v8/third_party/instrumented_libraries/scripts/download_binaries.py',
],
'name': 'msan_chained_origins',
'pattern': '.',
'condition': 'checkout_instrumented_libraries',
'action': [ 'download_from_google_storage',
'--no_resume',
'--no_auth',
'--bucket', 'chromium-instrumented-libraries',
'-s', 'v8/third_party/instrumented_libraries/binaries/msan-chained-origins-trusty.tgz.sha1',
],
},
{
'name': 'msan_no_origins',
'pattern': '.',
'condition': 'checkout_instrumented_libraries',
'action': [ 'download_from_google_storage',
'--no_resume',
'--no_auth',
'--bucket', 'chromium-instrumented-libraries',
'-s', 'v8/third_party/instrumented_libraries/binaries/msan-no-origins-trusty.tgz.sha1',
],
},
{
# Update the Windows toolchain if necessary.
@ -283,9 +299,30 @@ hooks = [
'pattern': '.',
'action': ['python', 'v8/tools/clang/scripts/update.py'],
},
{
'name': 'fuchsia_sdk',
'pattern': '.',
'condition': 'checkout_fuchsia',
'action': [
'python',
'v8/build/fuchsia/update_sdk.py',
'226f6dd0cad1d6be63a353ce2649423470729ae9',
],
},
{
# A change to a .gyp, .gypi, or to GYP itself should run the generator.
'name': 'regyp_if_needed',
'pattern': '.',
'action': ['python', 'v8/gypfiles/gyp_v8', '--running-as-hook'],
},
# Download and initialize "vpython" VirtualEnv environment packages.
{
'name': 'vpython_common',
'pattern': '.',
'condition': 'checkout_android',
'action': [ 'vpython',
'-vpython-spec', 'v8/.vpython',
'-vpython-tool', 'install',
],
},
]

3
deps/v8/OWNERS vendored
View File

@ -27,10 +27,11 @@ mstarzinger@chromium.org
mtrofin@chromium.org
mvstanton@chromium.org
mythria@chromium.org
petermarshall@chromium.org
neis@chromium.org
petermarshall@chromium.org
rmcilroy@chromium.org
rossberg@chromium.org
sergiyb@chromium.org
tebbi@chromium.org
titzer@chromium.org
ulan@chromium.org

View File

@ -281,6 +281,8 @@ def _CommonChecks(input_api, output_api):
results.extend(_CheckMissingFiles(input_api, output_api))
results.extend(_CheckJSONFiles(input_api, output_api))
results.extend(_CheckMacroUndefs(input_api, output_api))
results.extend(input_api.RunTests(
input_api.canned_checks.CheckVPythonSpec(input_api, output_api)))
return results

View File

@ -189,6 +189,8 @@
// trace points would carry a significant performance cost of acquiring a lock
// and resolving the category.
// Check that nobody includes this file directly. Clients are supposed to
// include the surrounding "trace_event.h" of their project instead.
#if defined(TRACE_EVENT0)
#error "Another copy of this file has already been included."
#endif

View File

@ -106,6 +106,11 @@ template("v8_isolate_run") {
} else {
use_external_startup_data = "0"
}
if (is_ubsan_vptr) {
ubsan_vptr = "1"
} else {
ubsan_vptr = "0"
}
if (v8_use_snapshot) {
use_snapshot = "true"
} else {
@ -168,6 +173,8 @@ template("v8_isolate_run") {
"--config-variable",
"target_arch=$target_arch",
"--config-variable",
"ubsan_vptr=$ubsan_vptr",
"--config-variable",
"v8_use_external_startup_data=$use_external_startup_data",
"--config-variable",
"v8_use_snapshot=$use_snapshot",

10
deps/v8/gni/v8.gni vendored
View File

@ -174,3 +174,13 @@ template("v8_component") {
configs += v8_add_configs
}
}
template("v8_static_library") {
static_library(target_name) {
complete_static_lib = true
forward_variables_from(invoker, "*", [ "configs" ])
configs += invoker.configs
configs -= v8_remove_configs
configs += v8_add_configs
}
}

View File

@ -46,7 +46,7 @@
'../tools/gcmole/run_gcmole.gyp:*',
'../tools/jsfunfuzz/jsfunfuzz.gyp:*',
'../tools/run-deopt-fuzzer.gyp:*',
'../tools/run-valgrind.gyp:*',
'../tools/run-num-fuzzer.gyp:*',
],
}],
]

View File

@ -85,7 +85,7 @@
'v8_check_microtasks_scopes_consistency%': 'false',
# Enable concurrent marking.
'v8_enable_concurrent_marking%': 0,
'v8_enable_concurrent_marking%': 1,
# Controls the threshold for on-heap/off-heap Typed Arrays.
'v8_typed_array_max_size_in_heap%': 64,

View File

@ -80,6 +80,7 @@
'--config-variable', 'sanitizer_coverage=<(sanitizer_coverage)',
'--config-variable', 'component=<(component)',
'--config-variable', 'target_arch=<(target_arch)',
'--config-variable', 'ubsan_vptr=0',
'--config-variable', 'v8_use_external_startup_data=<(v8_use_external_startup_data)',
'--config-variable', 'v8_use_snapshot=<(v8_use_snapshot)',
],

View File

@ -439,6 +439,7 @@
'-Wno-undefined-var-template',
# TODO(yangguo): issue 5258
'-Wno-nonportable-include-path',
'-Wno-tautological-constant-compare',
],
'conditions':[
['OS=="android"', {
@ -783,6 +784,11 @@
# over the place.
'-fno-strict-aliasing',
],
}, {
'cflags' : [
# TODO(hans): https://crbug.com/767059
'-Wno-tautological-constant-compare',
],
}],
[ 'clang==1 and (v8_target_arch=="x64" or v8_target_arch=="arm64" \
or v8_target_arch=="mips64el")', {

View File

@ -8,6 +8,7 @@
#include "libplatform/libplatform-export.h"
#include "libplatform/v8-tracing.h"
#include "v8-platform.h" // NOLINT(build/include)
#include "v8config.h" // NOLINT(build/include)
namespace v8 {
namespace platform {
@ -33,12 +34,21 @@ enum class MessageLoopBehavior : bool {
* If |tracing_controller| is nullptr, the default platform will create a
* v8::platform::TracingController instance and use it.
*/
V8_PLATFORM_EXPORT v8::Platform* CreateDefaultPlatform(
V8_PLATFORM_EXPORT std::unique_ptr<v8::Platform> NewDefaultPlatform(
int thread_pool_size = 0,
IdleTaskSupport idle_task_support = IdleTaskSupport::kDisabled,
InProcessStackDumping in_process_stack_dumping =
InProcessStackDumping::kEnabled,
v8::TracingController* tracing_controller = nullptr);
std::unique_ptr<v8::TracingController> tracing_controller = {});
V8_PLATFORM_EXPORT V8_DEPRECATE_SOON(
"Use NewDefaultPlatform instead",
v8::Platform* CreateDefaultPlatform(
int thread_pool_size = 0,
IdleTaskSupport idle_task_support = IdleTaskSupport::kDisabled,
InProcessStackDumping in_process_stack_dumping =
InProcessStackDumping::kEnabled,
v8::TracingController* tracing_controller = nullptr));
/**
* Pumps the message loop for the given isolate.
@ -46,7 +56,7 @@ V8_PLATFORM_EXPORT v8::Platform* CreateDefaultPlatform(
* The caller has to make sure that this is called from the right thread.
* Returns true if a task was executed, and false otherwise. Unless requested
* through the |behavior| parameter, this call does not block if no task is
* pending. The |platform| has to be created using |CreateDefaultPlatform|.
* pending. The |platform| has to be created using |NewDefaultPlatform|.
*/
V8_PLATFORM_EXPORT bool PumpMessageLoop(
v8::Platform* platform, v8::Isolate* isolate,
@ -60,7 +70,7 @@ V8_PLATFORM_EXPORT void EnsureEventLoopInitialized(v8::Platform* platform,
*
* The caller has to make sure that this is called from the right thread.
* This call does not block if no task is pending. The |platform| has to be
* created using |CreateDefaultPlatform|.
* created using |NewDefaultPlatform|.
*/
V8_PLATFORM_EXPORT void RunIdleTasks(v8::Platform* platform,
v8::Isolate* isolate,
@ -69,13 +79,14 @@ V8_PLATFORM_EXPORT void RunIdleTasks(v8::Platform* platform,
/**
* Attempts to set the tracing controller for the given platform.
*
* The |platform| has to be created using |CreateDefaultPlatform|.
* The |platform| has to be created using |NewDefaultPlatform|.
*
* DEPRECATED: Will be removed soon.
*/
V8_PLATFORM_EXPORT void SetTracingController(
v8::Platform* platform,
v8::platform::tracing::TracingController* tracing_controller);
V8_PLATFORM_EXPORT V8_DEPRECATE_SOON(
"Access the DefaultPlatform directly",
void SetTracingController(
v8::Platform* platform,
v8::platform::tracing::TracingController* tracing_controller));
} // namespace platform
} // namespace v8

View File

@ -43,8 +43,8 @@ class V8_PLATFORM_EXPORT TraceObject {
const char** arg_names, const uint8_t* arg_types,
const uint64_t* arg_values,
std::unique_ptr<v8::ConvertableToTraceFormat>* arg_convertables,
unsigned int flags, int64_t timestamp, int64_t cpu_timestamp);
void UpdateDuration(int64_t timestamp, int64_t cpu_timestamp);
unsigned int flags);
void UpdateDuration();
void InitializeForTesting(
char phase, const uint8_t* category_enabled_flag, const char* name,
const char* scope, uint64_t id, uint64_t bind_id, int num_args,
@ -247,13 +247,6 @@ class V8_PLATFORM_EXPORT TracingController
const uint64_t* arg_values,
std::unique_ptr<v8::ConvertableToTraceFormat>* arg_convertables,
unsigned int flags) override;
uint64_t AddTraceEventWithTimestamp(
char phase, const uint8_t* category_enabled_flag, const char* name,
const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
const char** arg_names, const uint8_t* arg_types,
const uint64_t* arg_values,
std::unique_ptr<v8::ConvertableToTraceFormat>* arg_convertables,
unsigned int flags, int64_t timestamp) override;
void UpdateTraceEventDuration(const uint8_t* category_enabled_flag,
const char* name, uint64_t handle) override;
void AddTraceStateObserver(
@ -266,10 +259,6 @@ class V8_PLATFORM_EXPORT TracingController
static const char* GetCategoryGroupName(const uint8_t* category_enabled_flag);
protected:
virtual int64_t CurrentTimestampMicroseconds();
virtual int64_t CurrentCpuTimestampMicroseconds();
private:
const uint8_t* GetCategoryGroupEnabledInternal(const char* category_group);
void UpdateCategoryGroupEnabledFlag(size_t category_index);

View File

@ -215,6 +215,20 @@ class V8_EXPORT V8InspectorClient {
virtual void maxAsyncCallStackDepthChanged(int depth) {}
};
// These stack trace ids are intended to be passed between debuggers and be
// resolved later. This allows to track cross-debugger calls and step between
// them if a single client connects to multiple debuggers.
struct V8_EXPORT V8StackTraceId {
uintptr_t id;
std::pair<int64_t, int64_t> debugger_id;
V8StackTraceId();
V8StackTraceId(uintptr_t id, const std::pair<int64_t, int64_t> debugger_id);
~V8StackTraceId() = default;
bool IsInvalid() const;
};
class V8_EXPORT V8Inspector {
public:
static std::unique_ptr<V8Inspector> create(v8::Isolate*, V8InspectorClient*);
@ -237,6 +251,11 @@ class V8_EXPORT V8Inspector {
virtual void asyncTaskFinished(void* task) = 0;
virtual void allAsyncTasksCanceled() = 0;
virtual V8StackTraceId storeCurrentStackTrace(
const StringView& description) = 0;
virtual void externalAsyncTaskStarted(const V8StackTraceId& parent) = 0;
virtual void externalAsyncTaskFinished(const V8StackTraceId& parent) = 0;
// Exceptions instrumentation.
virtual unsigned exceptionThrown(
v8::Local<v8::Context>, const StringView& message,

View File

@ -119,11 +119,11 @@ class TracingController {
}
/**
* Adds a trace event to the platform tracing system. These function calls are
* Adds a trace event to the platform tracing system. This function call is
* usually the result of a TRACE_* macro from trace_event_common.h when
* tracing and the category of the particular trace are enabled. It is not
* advisable to call these functions on their own; they are really only meant
* to be used by the trace macros. The returned handle can be used by
* advisable to call this function on its own; it is really only meant to be
* used by the trace macros. The returned handle can be used by
* UpdateTraceEventDuration to update the duration of COMPLETE events.
*/
virtual uint64_t AddTraceEvent(
@ -135,15 +135,6 @@ class TracingController {
unsigned int flags) {
return 0;
}
virtual uint64_t AddTraceEventWithTimestamp(
char phase, const uint8_t* category_enabled_flag, const char* name,
const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
const char** arg_names, const uint8_t* arg_types,
const uint64_t* arg_values,
std::unique_ptr<ConvertableToTraceFormat>* arg_convertables,
unsigned int flags, int64_t timestamp) {
return 0;
}
/**
* Sets the duration field of a COMPLETE trace event. It must be called with

View File

@ -286,6 +286,13 @@ class V8_EXPORT CpuProfiler {
*/
static CpuProfiler* New(Isolate* isolate);
/**
* Synchronously collect current stack sample in all profilers attached to
* the |isolate|. The call does not affect number of ticks recorded for
* the current top node.
*/
static void CollectSample(Isolate* isolate);
/**
* Disposes the CPU profiler object.
*/
@ -322,7 +329,8 @@ class V8_EXPORT CpuProfiler {
* Recording the forced sample does not contribute to the aggregated
* profile statistics.
*/
void CollectSample();
V8_DEPRECATED("Use static CollectSample(Isolate*) instead.",
void CollectSample());
/**
* Tells the profiler whether the embedder is idle.

View File

@ -393,9 +393,14 @@ class PersistentValueMap : public PersistentValueMapBase<K, V, Traits> {
*/
Global<V> SetUnique(const K& key, Global<V>* persistent) {
if (Traits::kCallbackType != kNotWeak) {
WeakCallbackType callback_type =
Traits::kCallbackType == kWeakWithInternalFields
? WeakCallbackType::kInternalFields
: WeakCallbackType::kParameter;
Local<V> value(Local<V>::New(this->isolate(), *persistent));
persistent->template SetWeak<typename Traits::WeakCallbackDataType>(
Traits::WeakCallbackParameter(this, key, value), WeakCallback);
Traits::WeakCallbackParameter(this, key, value), WeakCallback,
callback_type);
}
PersistentContainerValue old_value =
Traits::Set(this->impl(), key, this->ClearAndLeak(persistent));

View File

@ -29,9 +29,10 @@
"." V8_S(V8_MINOR_VERSION) "." V8_S(V8_BUILD_NUMBER) "." V8_S( \
V8_PATCH_LEVEL) V8_EMBEDDER_STRING V8_CANDIDATE_STRING
#else
#define V8_VERSION_STRING \
V8_S(V8_MAJOR_VERSION) \
"." V8_S(V8_MINOR_VERSION) "." V8_S(V8_BUILD_NUMBER) V8_CANDIDATE_STRING
#define V8_VERSION_STRING \
V8_S(V8_MAJOR_VERSION) \
"." V8_S(V8_MINOR_VERSION) "." V8_S(V8_BUILD_NUMBER) \
V8_EMBEDDER_STRING V8_CANDIDATE_STRING
#endif
#endif // V8_VERSION_STRING_H_

View File

@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 6
#define V8_MINOR_VERSION 3
#define V8_BUILD_NUMBER 292
#define V8_PATCH_LEVEL 48
#define V8_MINOR_VERSION 4
#define V8_BUILD_NUMBER 388
#define V8_PATCH_LEVEL 40
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)

307
deps/v8/include/v8.h vendored
View File

@ -433,20 +433,6 @@ class WeakCallbackInfo {
V8_INLINE T* GetParameter() const { return parameter_; }
V8_INLINE void* GetInternalField(int index) const;
V8_INLINE V8_DEPRECATED("use indexed version",
void* GetInternalField1() const) {
return embedder_fields_[0];
}
V8_INLINE V8_DEPRECATED("use indexed version",
void* GetInternalField2() const) {
return embedder_fields_[1];
}
V8_DEPRECATED("Not realiable once SetSecondPassCallback() was used.",
bool IsFirstPass() const) {
return callback_ != nullptr;
}
// When first called, the embedder MUST Reset() the Global which triggered the
// callback. The Global itself is unusable for anything else. No v8 other api
// calls may be called in the first callback. Should additional work be
@ -579,16 +565,22 @@ template <class T> class PersistentBase {
* independent handle should not assume that it will be preceded by a global
* GC prologue callback or followed by a global GC epilogue callback.
*/
V8_INLINE void MarkIndependent();
V8_DEPRECATE_SOON(
"Objects are always considered independent. "
"Use MarkActive to avoid collecting otherwise dead weak handles.",
V8_INLINE void MarkIndependent());
/**
* Marks the reference to this object as active. The scavenge garbage
* collection should not reclaim the objects marked as active.
* collection should not reclaim the objects marked as active, even if the
* object held by the handle is otherwise unreachable.
*
* This bit is cleared after the each garbage collection pass.
*/
V8_INLINE void MarkActive();
V8_INLINE bool IsIndependent() const;
V8_DEPRECATE_SOON("See MarkIndependent.",
V8_INLINE bool IsIndependent() const);
/** Checks if the handle holds the only reference to an object. */
V8_INLINE bool IsNearDeath() const;
@ -984,9 +976,6 @@ class V8_EXPORT Data {
};
/**
* This is an unfinished experimental feature, and is only exposed
* here for internal testing purposes. DO NOT USE.
*
* A container type that holds relevant metadata for module loading.
*
* This is passed back to the embedder as part of
@ -1008,9 +997,6 @@ class V8_EXPORT ScriptOrModule {
};
/**
* This is an unfinished experimental feature, and is only exposed
* here for internal testing purposes. DO NOT USE.
*
* An array to hold Primitive values. This is used by the embedder to
* pass host defined options to the ScriptOptions during compilation.
*
@ -1440,6 +1426,26 @@ class V8_EXPORT ScriptCompiler {
kConsumeCodeCache
};
/**
* The reason for which we are not requesting or providing a code cache.
*/
enum NoCacheReason {
kNoCacheNoReason = 0,
kNoCacheBecauseCachingDisabled,
kNoCacheBecauseNoResource,
kNoCacheBecauseInlineScript,
kNoCacheBecauseModule,
kNoCacheBecauseStreamingSource,
kNoCacheBecauseInspector,
kNoCacheBecauseScriptTooSmall,
kNoCacheBecauseCacheTooCold,
kNoCacheBecauseV8Extension,
kNoCacheBecauseExtensionModule,
kNoCacheBecausePacScript,
kNoCacheBecauseInDocumentWrite,
kNoCacheBecauseResourceWithNoCacheHandler
};
/**
* Compiles the specified script (context-independent).
* Cached data as part of the source object can be optionally produced to be
@ -1456,10 +1462,12 @@ class V8_EXPORT ScriptCompiler {
static V8_DEPRECATED("Use maybe version",
Local<UnboundScript> CompileUnbound(
Isolate* isolate, Source* source,
CompileOptions options = kNoCompileOptions));
CompileOptions options = kNoCompileOptions,
NoCacheReason no_cache_reason = kNoCacheNoReason));
static V8_WARN_UNUSED_RESULT MaybeLocal<UnboundScript> CompileUnboundScript(
Isolate* isolate, Source* source,
CompileOptions options = kNoCompileOptions);
CompileOptions options = kNoCompileOptions,
NoCacheReason no_cache_reason = kNoCacheNoReason);
/**
* Compiles the specified script (bound to current context).
@ -1475,10 +1483,12 @@ class V8_EXPORT ScriptCompiler {
static V8_DEPRECATED(
"Use maybe version",
Local<Script> Compile(Isolate* isolate, Source* source,
CompileOptions options = kNoCompileOptions));
CompileOptions options = kNoCompileOptions,
NoCacheReason no_cache_reason = kNoCacheNoReason));
static V8_WARN_UNUSED_RESULT MaybeLocal<Script> Compile(
Local<Context> context, Source* source,
CompileOptions options = kNoCompileOptions);
CompileOptions options = kNoCompileOptions,
NoCacheReason no_cache_reason = kNoCacheNoReason);
/**
* Returns a task which streams script data into V8, or NULL if the script
@ -1568,7 +1578,8 @@ class V8_EXPORT ScriptCompiler {
private:
static V8_WARN_UNUSED_RESULT MaybeLocal<UnboundScript> CompileUnboundInternal(
Isolate* isolate, Source* source, CompileOptions options);
Isolate* isolate, Source* source, CompileOptions options,
NoCacheReason no_cache_reason);
};
@ -2070,20 +2081,6 @@ class V8_EXPORT ValueDeserializer {
PrivateData* private_;
};
/**
* A map whose keys are referenced weakly. It is similar to JavaScript WeakMap
* but can be created without entering a v8::Context and hence shouldn't
* escape to JavaScript.
*/
class V8_EXPORT NativeWeakMap : public Data {
public:
static Local<NativeWeakMap> New(Isolate* isolate);
void Set(Local<Value> key, Local<Value> value);
Local<Value> Get(Local<Value> key) const;
bool Has(Local<Value> key);
bool Delete(Local<Value> key);
};
// --- Value ---
@ -3709,8 +3706,6 @@ class FunctionCallbackInfo {
V8_INLINE int Length() const;
/** Accessor for the available arguments. */
V8_INLINE Local<Value> operator[](int i) const;
V8_INLINE V8_DEPRECATED("Use Data() to explicitly pass Callee instead",
Local<Function> Callee() const);
/** Returns the receiver. This corresponds to the "this" value. */
V8_INLINE Local<Object> This() const;
/**
@ -3735,7 +3730,7 @@ class FunctionCallbackInfo {
/** The ReturnValue for the call. */
V8_INLINE ReturnValue<T> GetReturnValue() const;
// This shouldn't be public, but the arm compiler needs it.
static const int kArgsLength = 8;
static const int kArgsLength = 6;
protected:
friend class internal::FunctionCallbackArguments;
@ -3746,9 +3741,7 @@ class FunctionCallbackInfo {
static const int kReturnValueDefaultValueIndex = 2;
static const int kReturnValueIndex = 3;
static const int kDataIndex = 4;
static const int kCalleeIndex = 5;
static const int kContextSaveIndex = 6;
static const int kNewTargetIndex = 7;
static const int kNewTargetIndex = 5;
V8_INLINE FunctionCallbackInfo(internal::Object** implicit_args,
internal::Object** values, int length);
@ -5262,7 +5255,7 @@ typedef void (*GenericNamedPropertySetterCallback)(
* defineProperty().
*
* Use `info.GetReturnValue().Set(value)` to set the property attributes. The
* value is an interger encoding a `v8::PropertyAttribute`.
* value is an integer encoding a `v8::PropertyAttribute`.
*
* \param property The name of the property for which the request was
* intercepted.
@ -5986,7 +5979,7 @@ class V8_EXPORT ObjectTemplate : public Template {
bool IsImmutableProto();
/**
* Makes the ObjectTempate for an immutable prototype exotic object, with an
* Makes the ObjectTemplate for an immutable prototype exotic object, with an
* immutable __proto__.
*/
void SetImmutableProto();
@ -6291,6 +6284,20 @@ typedef MaybeLocal<Promise> (*HostImportModuleDynamicallyCallback)(
Local<Context> context, Local<ScriptOrModule> referrer,
Local<String> specifier);
/**
* HostInitializeImportMetaObjectCallback is called the first time import.meta
* is accessed for a module. Subsequent access will reuse the same value.
*
* The method combines two implementation-defined abstract operations into one:
* HostGetImportMetaProperties and HostFinalizeImportMeta.
*
* The embedder should use v8::Object::CreateDataProperty to add properties on
* the meta object.
*/
typedef void (*HostInitializeImportMetaObjectCallback)(Local<Context> context,
Local<Module> module,
Local<Object> meta);
/**
* PromiseHook with type kInit is called when a new promise is
* created. When a new promise is created as part of the chain in the
@ -6418,6 +6425,9 @@ typedef bool (*AllowCodeGenerationFromStringsCallback)(Local<Context> context,
// --- WebAssembly compilation callbacks ---
typedef bool (*ExtensionCallback)(const FunctionCallbackInfo<Value>&);
typedef bool (*AllowWasmCodeGenerationCallback)(Local<Context> context,
Local<String> source);
// --- Callback for APIs defined on v8-supported objects, but implemented
// by the embedder. Example: WebAssembly.{compile|instantiate}Streaming ---
typedef void (*ApiImplementationCallback)(const FunctionCallbackInfo<Value>&);
@ -7049,9 +7059,15 @@ class V8_EXPORT Isolate {
kConstructorNonUndefinedPrimitiveReturn = 39,
kLabeledExpressionStatement = 40,
kLineOrParagraphSeparatorAsLineTerminator = 41,
kIndexAccessor = 42,
kErrorCaptureStackTrace = 43,
kErrorPrepareStackTrace = 44,
kErrorStackTraceLimit = 45,
kWebAssemblyInstantiation = 46,
// If you add new values here, you'll also need to update Chromium's:
// UseCounter.h, V8PerIsolateData.cpp, histograms.xml
// web_feature.mojom, UseCounterCallback.cpp, and enums.xml. V8 changes to
// this list need to be landed first, then changes on the Chromium side.
kUseCounterFeatureCount // This enum value must be last.
};
@ -7102,15 +7118,22 @@ class V8_EXPORT Isolate {
AbortOnUncaughtExceptionCallback callback);
/**
* This is an unfinished experimental feature, and is only exposed
* here for internal testing purposes. DO NOT USE.
*
* This specifies the callback called by the upcoming dynamic
* import() language feature to load modules.
*/
void SetHostImportModuleDynamicallyCallback(
HostImportModuleDynamicallyCallback callback);
/**
* This is an unfinished experimental feature, and is only exposed
* here for internal testing purposes. DO NOT USE.
*
* This specifies the callback called by the upcoming importa.meta
* language feature to retrieve host-defined meta data for a module.
*/
void SetHostInitializeImportMetaObjectCallback(
HostInitializeImportMetaObjectCallback callback);
/**
* Optional notification that the system is running low on memory.
* V8 uses these notifications to guide heuristics.
@ -7275,8 +7298,8 @@ class V8_EXPORT Isolate {
* is initialized. It is the embedder's responsibility to stop all CPU
* profiling activities if it has started any.
*/
V8_DEPRECATE_SOON("CpuProfiler should be created with CpuProfiler::New call.",
CpuProfiler* GetCpuProfiler());
V8_DEPRECATED("CpuProfiler should be created with CpuProfiler::New call.",
CpuProfiler* GetCpuProfiler());
/** Returns true if this isolate has a current context. */
bool InContext();
@ -7702,6 +7725,13 @@ class V8_EXPORT Isolate {
void SetAllowCodeGenerationFromStringsCallback(
AllowCodeGenerationFromStringsCallback callback);
/**
* Set the callback to invoke to check if wasm code generation should
* be allowed.
*/
void SetAllowWasmCodeGenerationCallback(
AllowWasmCodeGenerationCallback callback);
/**
* Embedder over{ride|load} injection points for wasm APIs. The expectation
* is that the embedder sets them at most once.
@ -7957,50 +7987,6 @@ class V8_EXPORT V8 {
"Use isolate version",
void SetFailedAccessCheckCallbackFunction(FailedAccessCheckCallback));
/**
* Enables the host application to receive a notification before a
* garbage collection. Allocations are not allowed in the
* callback function, you therefore cannot manipulate objects (set
* or delete properties for example) since it is possible such
* operations will result in the allocation of objects. It is possible
* to specify the GCType filter for your callback. But it is not possible to
* register the same callback function two times with different
* GCType filters.
*/
static V8_DEPRECATED(
"Use isolate version",
void AddGCPrologueCallback(GCCallback callback,
GCType gc_type_filter = kGCTypeAll));
/**
* This function removes callback which was installed by
* AddGCPrologueCallback function.
*/
static V8_DEPRECATED("Use isolate version",
void RemoveGCPrologueCallback(GCCallback callback));
/**
* Enables the host application to receive a notification after a
* garbage collection. Allocations are not allowed in the
* callback function, you therefore cannot manipulate objects (set
* or delete properties for example) since it is possible such
* operations will result in the allocation of objects. It is possible
* to specify the GCType filter for your callback. But it is not possible to
* register the same callback function two times with different
* GCType filters.
*/
static V8_DEPRECATED(
"Use isolate version",
void AddGCEpilogueCallback(GCCallback callback,
GCType gc_type_filter = kGCTypeAll));
/**
* This function removes callback which was installed by
* AddGCEpilogueCallback function.
*/
static V8_DEPRECATED("Use isolate version",
void RemoveGCEpilogueCallback(GCCallback callback));
/**
* Initializes V8. This function needs to be called before the first Isolate
* is created. It always returns true.
@ -8085,35 +8071,6 @@ class V8_EXPORT V8 {
"Use isolate version",
void VisitExternalResources(ExternalResourceVisitor* visitor));
/**
* Iterates through all the persistent handles in the current isolate's heap
* that have class_ids.
*/
V8_INLINE static V8_DEPRECATED(
"Use isolate version",
void VisitHandlesWithClassIds(PersistentHandleVisitor* visitor));
/**
* Iterates through all the persistent handles in isolate's heap that have
* class_ids.
*/
V8_INLINE static V8_DEPRECATED(
"Use isolate version",
void VisitHandlesWithClassIds(Isolate* isolate,
PersistentHandleVisitor* visitor));
/**
* Iterates through all the persistent handles in the current isolate's heap
* that have class_ids and are candidates to be marked as partially dependent
* handles. This will visit handles to young objects created since the last
* garbage collection but is free to visit an arbitrary superset of these
* objects.
*/
V8_INLINE static V8_DEPRECATED(
"Use isolate version",
void VisitHandlesForPartialDependence(Isolate* isolate,
PersistentHandleVisitor* visitor));
/**
* Initialize the ICU library bundled with V8. The embedder should only
* invoke this method when using the bundled ICU. Returns true on success.
@ -8383,18 +8340,45 @@ class Maybe {
friend Maybe<U> Just(const U& u);
};
template <class T>
inline Maybe<T> Nothing() {
return Maybe<T>();
}
template <class T>
inline Maybe<T> Just(const T& t) {
return Maybe<T>(t);
}
// A template specialization of Maybe<T> for the case of T = void.
template <>
class Maybe<void> {
public:
V8_INLINE bool IsNothing() const { return !is_valid_; }
V8_INLINE bool IsJust() const { return is_valid_; }
V8_INLINE bool operator==(const Maybe& other) const {
return IsJust() == other.IsJust();
}
V8_INLINE bool operator!=(const Maybe& other) const {
return !operator==(other);
}
private:
struct JustTag {};
Maybe() : is_valid_(false) {}
explicit Maybe(JustTag) : is_valid_(true) {}
bool is_valid_;
template <class U>
friend Maybe<U> Nothing();
friend Maybe<void> JustVoid();
};
inline Maybe<void> JustVoid() { return Maybe<void>(Maybe<void>::JustTag()); }
/**
* An external exception handler.
@ -8803,7 +8787,7 @@ class V8_EXPORT Context {
* stack.
* https://html.spec.whatwg.org/multipage/webappapis.html#backup-incumbent-settings-object-stack
*/
class BackupIncumbentScope {
class V8_EXPORT BackupIncumbentScope {
public:
/**
* |backup_incumbent_context| is pushed onto the backup incumbent settings
@ -9047,8 +9031,7 @@ class Internals {
// These values match non-compiler-dependent values defined within
// the implementation of v8.
static const int kHeapObjectMapOffset = 0;
static const int kMapInstanceTypeAndBitFieldOffset =
1 * kApiPointerSize + kApiIntSize;
static const int kMapInstanceTypeOffset = 1 * kApiPointerSize + kApiIntSize;
static const int kStringResourceOffset = 3 * kApiPointerSize;
static const int kOddballKindOffset = 4 * kApiPointerSize + sizeof(double);
@ -9084,14 +9067,14 @@ class Internals {
static const int kNodeStateIsWeakValue = 2;
static const int kNodeStateIsPendingValue = 3;
static const int kNodeStateIsNearDeathValue = 4;
static const int kNodeIsIndependentShift = 3;
static const int kNodeIsActiveShift = 4;
static const int kFirstNonstringType = 0x80;
static const int kOddballType = 0x83;
static const int kForeignType = 0x87;
static const int kJSApiObjectType = 0xbf;
static const int kJSObjectType = 0xc0;
static const int kJSSpecialApiObjectType = 0xbc;
static const int kJSApiObjectType = 0xc0;
static const int kJSObjectType = 0xc1;
static const int kUndefinedOddballKind = 5;
static const int kNullOddballKind = 3;
@ -9125,9 +9108,7 @@ class Internals {
V8_INLINE static int GetInstanceType(const internal::Object* obj) {
typedef internal::Object O;
O* map = ReadField<O*>(obj, kHeapObjectMapOffset);
// Map::InstanceType is defined so that it will always be loaded into
// the LS 8 bits of one 16-bit word, regardless of endianess.
return ReadField<uint16_t>(map, kMapInstanceTypeAndBitFieldOffset) & 0xff;
return ReadField<uint16_t>(map, kMapInstanceTypeOffset);
}
V8_INLINE static int GetOddballKind(const internal::Object* obj) {
@ -9284,16 +9265,11 @@ void Persistent<T, M>::Copy(const Persistent<S, M2>& that) {
M::Copy(that, this);
}
template <class T>
bool PersistentBase<T>::IsIndependent() const {
typedef internal::Internals I;
if (this->IsEmpty()) return false;
return I::GetNodeFlag(reinterpret_cast<internal::Object**>(this->val_),
I::kNodeIsIndependentShift);
return true;
}
template <class T>
bool PersistentBase<T>::IsNearDeath() const {
typedef internal::Internals I;
@ -9374,13 +9350,7 @@ void PersistentBase<T>::RegisterExternalReference(Isolate* isolate) const {
}
template <class T>
void PersistentBase<T>::MarkIndependent() {
typedef internal::Internals I;
if (this->IsEmpty()) return;
I::UpdateNodeFlag(reinterpret_cast<internal::Object**>(this->val_),
true,
I::kNodeIsIndependentShift);
}
void PersistentBase<T>::MarkIndependent() {}
template <class T>
void PersistentBase<T>::MarkActive() {
@ -9550,13 +9520,6 @@ Local<Value> FunctionCallbackInfo<T>::operator[](int i) const {
}
template<typename T>
Local<Function> FunctionCallbackInfo<T>::Callee() const {
return Local<Function>(reinterpret_cast<Function*>(
&implicit_args_[kCalleeIndex]));
}
template<typename T>
Local<Object> FunctionCallbackInfo<T>::This() const {
return Local<Object>(reinterpret_cast<Object*>(values_ + 1));
@ -9697,7 +9660,8 @@ Local<Value> Object::GetInternalField(int index) {
// know where to find the internal fields and can return the value directly.
auto instance_type = I::GetInstanceType(obj);
if (instance_type == I::kJSObjectType ||
instance_type == I::kJSApiObjectType) {
instance_type == I::kJSApiObjectType ||
instance_type == I::kJSSpecialApiObjectType) {
int offset = I::kJSObjectHeaderSize + (internal::kApiPointerSize * index);
O* value = I::ReadField<O*>(obj, offset);
O** result = HandleScope::CreateHandle(reinterpret_cast<HO*>(obj), value);
@ -9717,7 +9681,8 @@ void* Object::GetAlignedPointerFromInternalField(int index) {
// know where to find the internal fields and can return the value directly.
auto instance_type = I::GetInstanceType(obj);
if (V8_LIKELY(instance_type == I::kJSObjectType ||
instance_type == I::kJSApiObjectType)) {
instance_type == I::kJSApiObjectType ||
instance_type == I::kJSSpecialApiObjectType)) {
int offset = I::kJSObjectHeaderSize + (internal::kApiPointerSize * index);
return I::ReadField<void*>(obj, offset);
}
@ -10404,24 +10369,6 @@ void V8::VisitExternalResources(ExternalResourceVisitor* visitor) {
isolate->VisitExternalResources(visitor);
}
void V8::VisitHandlesWithClassIds(PersistentHandleVisitor* visitor) {
Isolate* isolate = Isolate::GetCurrent();
isolate->VisitHandlesWithClassIds(visitor);
}
void V8::VisitHandlesWithClassIds(Isolate* isolate,
PersistentHandleVisitor* visitor) {
isolate->VisitHandlesWithClassIds(visitor);
}
void V8::VisitHandlesForPartialDependence(Isolate* isolate,
PersistentHandleVisitor* visitor) {
isolate->VisitHandlesForPartialDependence(visitor);
}
/**
* \example shell.cc
* A simple shell that takes a list of expressions on the

View File

@ -9,16 +9,8 @@ commit_burst_delay: 60
max_commit_burst: 1
gerrit {}
rietveld {
url: "https://codereview.chromium.org"
}
verifiers {
reviewer_lgtm {
committer_list: "project-v8-committers"
dry_run_access_list: "project-v8-tryjob-access"
}
gerrit_cq_ability {
committer_list: "project-v8-committers"
dry_run_access_list: "project-v8-tryjob-access"
@ -30,20 +22,26 @@ verifiers {
try_job {
buckets {
name: "master.tryserver.v8"
name: "luci.v8.try"
builders { name: "v8_android_arm_compile_rel" }
builders { name: "v8_fuchsia_rel_ng" }
builders { name: "v8_linux64_gcc_compile_dbg" }
builders { name: "v8_linux_gcc_compile_rel" }
builders { name: "v8_linux_shared_compile_rel" }
builders { name: "v8_presubmit" }
builders {
name: "v8_win64_msvc_compile_rel"
experiment_percentage: 20
}
}
buckets {
name: "master.tryserver.v8"
builders { name: "v8_node_linux64_rel" }
builders { name: "v8_linux64_asan_rel_ng" }
builders {
name: "v8_linux64_asan_rel_ng_triggered"
triggered_by: "v8_linux64_asan_rel_ng"
}
builders { name: "v8_linux64_avx2_rel_ng" }
builders {
name: "v8_linux64_avx2_rel_ng_triggered"
triggered_by: "v8_linux64_avx2_rel_ng"
}
builders { name: "v8_linux64_gcc_compile_dbg" }
builders { name: "v8_linux64_gyp_rel_ng" }
builders {
name: "v8_linux64_gyp_rel_ng_triggered"
@ -75,7 +73,6 @@ verifiers {
name: "v8_linux_dbg_ng_triggered"
triggered_by: "v8_linux_dbg_ng"
}
builders { name: "v8_linux_gcc_compile_rel" }
builders { name: "v8_linux_mipsel_compile_rel" }
builders { name: "v8_linux_mips64el_compile_rel" }
builders { name: "v8_linux_nodcheck_rel_ng" }
@ -98,7 +95,6 @@ verifiers {
name: "v8_mac_rel_ng_triggered"
triggered_by: "v8_mac_rel_ng"
}
builders { name: "v8_presubmit" }
builders { name: "v8_win64_rel_ng" }
builders {
name: "v8_win64_rel_ng_triggered"

View File

@ -62,9 +62,9 @@
'V8 Linux - noi18n - debug': 'gn_debug_x86_no_i18n',
'V8 Linux - verify csa': 'gn_release_x86_verify_csa',
# Linux64.
'V8 Linux64 - builder': 'gn_release_x64_valgrind',
'V8 Linux64 - builder': 'gn_release_x64',
'V8 Linux64 - concurrent marking - builder': 'gn_release_x64_concurrent_marking',
'V8 Linux64 - debug builder': 'gn_debug_x64_valgrind',
'V8 Linux64 - debug builder': 'gn_debug_x64',
'V8 Linux64 - custom snapshot - debug builder': 'gn_debug_x64_custom',
'V8 Linux64 - internal snapshot': 'gn_release_x64_internal',
'V8 Linux64 - gyp': 'gyp_release_x64',
@ -74,11 +74,10 @@
'V8 Win32 - debug builder': 'gn_debug_x86_minimal_symbols',
'V8 Win32 - nosnap - shared':
'gn_release_x86_no_snap_shared_minimal_symbols',
'V8 Win32 ASAN': 'gn_release_x86_asan_no_lsan',
'V8 Win64': 'gn_release_x64_minimal_symbols',
'V8 Win64 - debug': 'gn_debug_x64_minimal_symbols',
# TODO(machenbach): Switch plugins on when errors are fixed.
'V8 Win64 - clang': 'gn_release_x64_clang',
'V8 Win64 ASAN': 'gn_release_x64_asan_no_lsan',
'V8 Win64 - msvc': 'gn_release_x64_msvc',
# Mac.
'V8 Mac': 'gn_release_x86',
'V8 Mac - debug': 'gn_debug_x86',
@ -96,6 +95,8 @@
'V8 Linux gcc 4.8': 'gn_release_x86_gcc',
'V8 Linux64 gcc 4.8 - debug': 'gn_debug_x64_gcc',
# FYI.
'V8 Fuchsia': 'gn_release_x64_fuchsia',
'V8 Fuchsia - debug': 'gn_debug_x64_fuchsia',
'V8 Linux - swarming staging': 'gn_release_x64',
'V8 Linux64 - cfi': 'gn_release_x64_cfi',
'V8 Linux64 UBSanVptr': 'gn_release_x64_ubsan_vptr',
@ -107,6 +108,13 @@
'V8 Random Deopt Fuzzer - debug': 'gn_debug_x64',
},
'client.v8.clusterfuzz': {
'V8 Win32 ASAN - release builder':
'gn_release_x86_asan_no_lsan_verify_heap',
# Note this is called a debug builder, but it uses a release build
# configuration with dchecks (which enables DEBUG in V8), since win-asan
# debug is not supported.
'V8 Win32 ASAN - debug builder':
'gn_release_x86_asan_no_lsan_verify_heap_dchecks',
'V8 Mac64 ASAN - release builder':
'gn_release_x64_asan_no_lsan_edge_verify_heap',
'V8 Mac64 ASAN - debug builder':
@ -127,7 +135,7 @@
'gn_release_simulate_arm64_msan_no_origins_edge',
'V8 Linux MSAN chained origins':
'gn_release_simulate_arm64_msan_edge',
'V8 Linux64 UBSan - release builder': 'gn_release_x64_ubsan_recover',
'V8 Linux64 TSAN - release builder': 'gn_release_x64_tsan',
'V8 Linux64 UBSanVptr - release builder':
'gn_release_x64_ubsan_vptr_recover_edge',
},
@ -150,11 +158,11 @@
'V8 Linux - mipsel - sim - builder': 'gn_release_simulate_mipsel',
'V8 Linux - mips64el - sim - builder': 'gn_release_simulate_mips64el',
# PPC.
'V8 Linux - ppc - sim': 'gyp_release_simulate_ppc',
'V8 Linux - ppc64 - sim': 'gyp_release_simulate_ppc64',
'V8 Linux - ppc - sim': 'gn_release_simulate_ppc',
'V8 Linux - ppc64 - sim': 'gn_release_simulate_ppc64',
# S390.
'V8 Linux - s390 - sim': 'gyp_release_simulate_s390',
'V8 Linux - s390x - sim': 'gyp_release_simulate_s390x',
'V8 Linux - s390 - sim': 'gn_release_simulate_s390',
'V8 Linux - s390x - sim': 'gn_release_simulate_s390x',
},
'client.v8.branches': {
'V8 Linux - beta branch': 'gn_release_x86',
@ -173,19 +181,19 @@
'V8 mips64el - sim - stable branch': 'gn_release_simulate_mips64el',
'V8 mipsel - sim - beta branch': 'gn_release_simulate_mipsel',
'V8 mipsel - sim - stable branch': 'gn_release_simulate_mipsel',
'V8 ppc - sim - beta branch': 'gyp_release_simulate_ppc',
'V8 ppc - sim - stable branch': 'gyp_release_simulate_ppc',
'V8 ppc64 - sim - beta branch': 'gyp_release_simulate_ppc64',
'V8 ppc64 - sim - stable branch': 'gyp_release_simulate_ppc64',
'V8 s390 - sim - beta branch': 'gyp_release_simulate_s390',
'V8 s390 - sim - stable branch': 'gyp_release_simulate_s390',
'V8 s390x - sim - beta branch': 'gyp_release_simulate_s390x',
'V8 s390x - sim - stable branch': 'gyp_release_simulate_s390x',
'V8 ppc - sim - beta branch': 'gn_release_simulate_ppc',
'V8 ppc - sim - stable branch': 'gn_release_simulate_ppc',
'V8 ppc64 - sim - beta branch': 'gn_release_simulate_ppc64',
'V8 ppc64 - sim - stable branch': 'gn_release_simulate_ppc64',
'V8 s390 - sim - beta branch': 'gn_release_simulate_s390',
'V8 s390 - sim - stable branch': 'gn_release_simulate_s390',
'V8 s390x - sim - beta branch': 'gn_release_simulate_s390x',
'V8 s390x - sim - stable branch': 'gn_release_simulate_s390x',
},
'tryserver.v8': {
'v8_fuchsia_rel_ng': 'gn_release_x64_fuchsia_trybot',
'v8_linux_rel_ng': 'gn_release_x86_gcmole_trybot',
'v8_linux_verify_csa_rel_ng': 'gn_release_x86_verify_csa',
'v8_linux_avx2_dbg': 'gn_debug_x86_trybot',
'v8_linux_nodcheck_rel_ng': 'gn_release_x86_minimal_symbols',
'v8_linux_dbg_ng': 'gn_debug_x86_trybot',
'v8_linux_noi18n_rel_ng': 'gn_release_x86_no_i18n_trybot',
@ -194,12 +202,11 @@
'v8_linux_nosnap_dbg': 'gn_debug_x86_no_snap_trybot',
'v8_linux_gcc_compile_rel': 'gn_release_x86_gcc_minimal_symbols',
'v8_linux_gcc_rel': 'gn_release_x86_gcc_minimal_symbols',
'v8_linux_shared_compile_rel': 'gn_release_x86_shared_verify_heap',
'v8_linux64_gcc_compile_dbg': 'gn_debug_x64_gcc',
'v8_linux64_rel_ng': 'gn_release_x64_valgrind_trybot',
'v8_linux64_rel_ng': 'gn_release_x64_trybot',
'v8_linux64_verify_csa_rel_ng': 'gn_release_x64_verify_csa',
'v8_linux64_gyp_rel_ng': 'gyp_release_x64',
'v8_linux64_avx2_rel_ng': 'gn_release_x64_trybot',
'v8_linux64_avx2_dbg': 'gn_debug_x64_trybot',
'v8_linux64_asan_rel_ng': 'gn_release_x64_asan_minimal_symbols',
'v8_linux64_msan_rel': 'gn_release_simulate_arm64_msan_minimal_symbols',
'v8_linux64_sanitizer_coverage_rel':
@ -208,12 +215,14 @@
'v8_linux64_tsan_concurrent_marking_rel_ng':
'gn_release_x64_tsan_concurrent_marking_minimal_symbols',
'v8_linux64_ubsan_rel_ng': 'gn_release_x64_ubsan_vptr_minimal_symbols',
'v8_win_asan_rel_ng': 'gn_release_x86_asan_no_lsan',
'v8_win_dbg': 'gn_debug_x86_trybot',
'v8_win_compile_dbg': 'gn_debug_x86_trybot',
'v8_win_rel_ng': 'gn_release_x86_trybot',
'v8_win_nosnap_shared_rel_ng':
'gn_release_x86_no_snap_shared_minimal_symbols',
'v8_win64_asan_rel_ng': 'gn_release_x64_asan_no_lsan',
# TODO(machenbach): Rename bot to msvc.
'v8_win64_msvc_compile_rel': 'gn_release_x64_msvc',
'v8_win64_dbg': 'gn_debug_x64_minimal_symbols',
'v8_win64_rel_ng': 'gn_release_x64_trybot',
'v8_mac_rel_ng': 'gn_release_x86_trybot',
@ -355,11 +364,18 @@
'gn', 'release_bot', 'simulate_mipsel', 'swarming'],
'gn_release_simulate_mips64el': [
'gn', 'release_bot', 'simulate_mips64el', 'swarming'],
'gn_release_simulate_ppc': [
'gn', 'release_bot', 'simulate_ppc', 'swarming'],
'gn_release_simulate_ppc64': [
'gn', 'release_bot', 'simulate_ppc64', 'swarming'],
'gn_release_simulate_s390': [
'gn', 'release_bot', 'simulate_s390', 'swarming'],
'gn_release_simulate_s390x': [
'gn', 'release_bot', 'simulate_s390x', 'swarming'],
# GN debug configs for arm.
'gn_debug_arm': [
'gn', 'debug_bot', 'arm', 'crosscompile', 'hard_float', 'swarming',
'no_custom_libcxx'],
'gn', 'debug_bot', 'arm', 'crosscompile', 'hard_float', 'swarming'],
# GN release configs for arm.
'gn_release_arm': [
@ -394,12 +410,16 @@
'gn', 'release_bot', 'x64', 'cfi', 'swarming'],
'gn_release_x64_cfi_clusterfuzz': [
'gn', 'release_bot', 'x64', 'cfi_clusterfuzz'],
'gn_release_x64_clang': [
'gn', 'release_bot', 'x64', 'clang', 'swarming'],
'gn_release_x64_msvc': [
'gn', 'release_bot', 'x64', 'msvc', 'swarming'],
'gn_release_x64_concurrent_marking': [
'gn', 'release_bot', 'x64', 'v8_enable_concurrent_marking', 'swarming'],
'gn_release_x64_correctness_fuzzer' : [
'gn', 'release_bot', 'x64', 'v8_correctness_fuzzer'],
'gn', 'release_bot', 'x64', 'v8_correctness_fuzzer', 'swarming'],
'gn_release_x64_fuchsia': [
'gn', 'release_bot', 'x64', 'fuchsia', 'swarming'],
'gn_release_x64_fuchsia_trybot': [
'gn', 'release_trybot', 'x64', 'fuchsia', 'swarming'],
'gn_release_x64_gcc_coverage': [
'gn', 'release_bot', 'x64', 'coverage', 'gcc'],
'gn_release_x64_internal': [
@ -418,20 +438,12 @@
'minimal_symbols', 'swarming'],
'gn_release_x64_tsan_minimal_symbols': [
'gn', 'release_bot', 'x64', 'tsan', 'minimal_symbols', 'swarming'],
'gn_release_x64_ubsan_recover': [
'gn', 'release_bot', 'x64', 'ubsan_recover', 'swarming'],
'gn_release_x64_ubsan_vptr': [
'gn', 'release_bot', 'x64', 'ubsan_vptr', 'swarming'],
'gn_release_x64_ubsan_vptr_recover_edge': [
'gn', 'release_bot', 'x64', 'edge', 'ubsan_vptr_recover', 'swarming'],
'gn_release_x64_ubsan_vptr_minimal_symbols': [
'gn', 'release_bot', 'x64', 'ubsan_vptr', 'minimal_symbols', 'swarming'],
'gn_release_x64_valgrind': [
'gn', 'release_bot', 'x64', 'swarming', 'valgrind',
'no_custom_libcxx'],
'gn_release_x64_valgrind_trybot': [
'gn', 'release_trybot', 'x64', 'swarming', 'valgrind',
'no_custom_libcxx'],
'gn_release_x64_verify_csa': [
'gn', 'release_bot', 'x64', 'swarming', 'dcheck_always_on',
'v8_enable_slow_dchecks', 'v8_verify_csa'],
@ -446,15 +458,12 @@
'v8_optimized_debug', 'x64', 'asan', 'edge', 'swarming'],
'gn_debug_x64_custom': [
'gn', 'debug_bot', 'x64', 'swarming', 'v8_snapshot_custom'],
'gn_debug_x64_fuchsia': [
'gn', 'debug_bot', 'x64', 'fuchsia', 'swarming'],
'gn_debug_x64_gcc': [
'gn', 'debug_bot', 'x64', 'gcc'],
'gn_debug_x64_minimal_symbols': [
'gn', 'debug_bot', 'x64', 'minimal_symbols', 'swarming'],
'gn_debug_x64_trybot': [
'gn', 'debug_trybot', 'x64', 'swarming'],
'gn_debug_x64_valgrind': [
'gn', 'debug_bot', 'x64', 'swarming', 'valgrind',
'no_custom_libcxx'],
# GN debug configs for x86.
'gn_debug_x86': [
@ -478,6 +487,14 @@
# GN release configs for x86.
'gn_release_x86': [
'gn', 'release_bot', 'x86', 'swarming'],
'gn_release_x86_asan_no_lsan': [
'gn', 'release_bot', 'x86', 'asan', 'clang', 'swarming'],
'gn_release_x86_asan_no_lsan_verify_heap': [
'gn', 'release_bot', 'x86', 'asan', 'clang', 'swarming',
'v8_verify_heap'],
'gn_release_x86_asan_no_lsan_verify_heap_dchecks': [
'gn', 'release_bot', 'x86', 'asan', 'clang', 'swarming',
'dcheck_always_on', 'v8_enable_slow_dchecks', 'v8_verify_heap'],
'gn_release_x86_disassembler': [
'gn', 'release_bot', 'x86', 'v8_enable_disassembler'],
'gn_release_x86_gcc': [
@ -513,22 +530,12 @@
# Gyp release configs for mips.
'gyp_release_mips_no_snap_no_i18n': [
'gyp', 'release', 'mips', 'crosscompile', 'static', 'v8_no_i18n',
'v8_snapshot_none'],
# Gyp release configs for simulators.
'gyp_release_simulate_ppc': [
'gyp', 'release_bot', 'simulate_ppc', 'swarming'],
'gyp_release_simulate_ppc64': [
'gyp', 'release_bot', 'simulate_ppc64', 'swarming'],
'gyp_release_simulate_s390': [
'gyp', 'release_bot', 'simulate_s390', 'swarming'],
'gyp_release_simulate_s390x': [
'gyp', 'release_bot', 'simulate_s390x', 'swarming'],
'gyp', 'release', 'mips', 'crosscompile', 'no_sysroot', 'static',
'v8_no_i18n', 'v8_snapshot_none'],
# Gyp release configs for x64.
'gyp_release_x64': [
'gyp', 'release_bot', 'x64', 'swarming'],
'gyp', 'release_bot', 'x64', 'no_sysroot', 'swarming'],
},
'mixins': {
@ -608,6 +615,10 @@
'gyp_defines': 'sanitizer_coverage=trace-pc-guard',
},
'fuchsia': {
'gn_args': 'target_os="fuchsia"',
},
'gcc': {
# TODO(machenbach): Remove cxx11 restriction when updating gcc version.
'gn_args': 'is_clang=false use_cxx11=true',
@ -660,9 +671,13 @@
'use_prebuilt_instrumented_libraries=true'),
},
# TODO(machenbach): Remove when http://crbug.com/738814 is resolved.
'no_custom_libcxx': {
'gn_args': 'use_custom_libcxx=false',
'msvc': {
'gn_args': 'is_clang=false',
},
'no_sysroot': {
'gn_args': 'use_sysroot=false',
'gyp_defines': 'use_sysroot=0',
},
'release': {
@ -745,11 +760,6 @@
'gyp_defines': 'clang=1 tsan=1',
},
'ubsan_recover': {
# Ubsan with recovery.
'gn_args': 'is_ubsan=true is_ubsan_no_recover=false',
},
'ubsan_vptr': {
# TODO(krasin): Remove is_ubsan_no_recover=true when
# https://llvm.org/bugs/show_bug.cgi?id=25569 is fixed and just use
@ -762,11 +772,6 @@
'gn_args': 'is_ubsan_vptr=true is_ubsan_no_recover=false',
},
'valgrind': {
'gn_args': 'v8_has_valgrind=true',
'gyp_defines': 'has_valgrind=1',
},
'v8_no_i18n': {
'gn_args': 'v8_enable_i18n_support=false icu_use_data_file=false',
'gyp_defines': 'v8_enable_i18n_support=0 icu_use_data_file_flag=0',

4
deps/v8/infra/testing/OWNERS vendored Normal file
View File

@ -0,0 +1,4 @@
set noparent
machenbach@chromium.org
sergiyb@chromium.org

50
deps/v8/infra/testing/README.md vendored Normal file
View File

@ -0,0 +1,50 @@
# Src-side test specifications
The infra/testing folder in V8 contains test specifications, consumed and
executed by the continuous infrastructure. Every master has an optional file
named `<mastername>.pyl`. E.g. `tryserver.v8.pyl`.
The structure of each file is:
```
{
<buildername>: [
{
'name': <test-spec name>,
'variant': <variant name>,
'shards': <number of shards>,
},
...
],
...
}
```
The `<buildername>` is a string name of the builder to execute the tests.
`<test-spec name>` is a label defining a test specification matching the
[infra-side](https://chromium.googlesource.com/chromium/tools/build/+/master/scripts/slave/recipe_modules/v8/testing.py#58).
The `<variant name>` is a testing variant as specified in
`v8/tools/testrunner/local/variants.py`. `<number of shards>` is optional
(default 1), but can be provided to increase the swarming shards for
long-running tests.
Example:
```
{
'v8_linux64_rel_ng_triggered': [
{'name': 'v8testing', 'variant': 'nooptimization', 'shards': 2},
],
}
```
## Guidelines
Please keep trybots and continuous bots in sync. E.g. add the same configuration
for the release and debug CI bots and the corresponding trybot (where
applicable). E.g.
```
tryserver.v8:
v8_linux64_rel_ng_triggered
client.v8:
V8 Linux64
V8 Linux64 - debug
```

13
deps/v8/infra/testing/client.v8.pyl vendored Normal file
View File

@ -0,0 +1,13 @@
# Copyright 2017 The V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
### Example configuration for CI bots (please keep as reference).
# 'V8 Linux64': [
# {'name': 'benchmarks', 'variant': 'default', 'shards': 1},
# ],
# 'V8 Linux64 - debug': [
# {'name': 'benchmarks', 'variant': 'default', 'shards': 1},
# ],
}

10
deps/v8/infra/testing/tryserver.v8.pyl vendored Normal file
View File

@ -0,0 +1,10 @@
# Copyright 2017 The V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
### Example configuration for trybots (please keep as reference).
# 'v8_linux64_rel_ng_triggered': [
# {'name': 'benchmarks', 'variant': 'default', 'shards': 1},
# ],
}

View File

@ -13,8 +13,8 @@ int main(int argc, char* argv[]) {
// Initialize V8.
v8::V8::InitializeICUDefaultLocation(argv[0]);
v8::V8::InitializeExternalStartupData(argv[0]);
v8::Platform* platform = v8::platform::CreateDefaultPlatform();
v8::V8::InitializePlatform(platform);
std::unique_ptr<v8::Platform> platform = v8::platform::NewDefaultPlatform();
v8::V8::InitializePlatform(platform.get());
v8::V8::Initialize();
// Create a new Isolate and make it the current one.
@ -56,7 +56,6 @@ int main(int argc, char* argv[]) {
isolate->Dispose();
v8::V8::Dispose();
v8::V8::ShutdownPlatform();
delete platform;
delete create_params.array_buffer_allocator;
return 0;
}

View File

@ -701,8 +701,8 @@ void PrintMap(map<string, string>* m) {
int main(int argc, char* argv[]) {
v8::V8::InitializeICUDefaultLocation(argv[0]);
v8::V8::InitializeExternalStartupData(argv[0]);
v8::Platform* platform = v8::platform::CreateDefaultPlatform();
v8::V8::InitializePlatform(platform);
std::unique_ptr<v8::Platform> platform = v8::platform::NewDefaultPlatform();
v8::V8::InitializePlatform(platform.get());
v8::V8::Initialize();
map<string, string> options;
string file;
@ -728,7 +728,7 @@ int main(int argc, char* argv[]) {
fprintf(stderr, "Error initializing processor.\n");
return 1;
}
if (!ProcessEntries(platform, &processor, kSampleSize, kSampleRequests))
if (!ProcessEntries(platform.get(), &processor, kSampleSize, kSampleRequests))
return 1;
PrintMap(&output);
}

View File

@ -66,8 +66,8 @@ static bool run_shell;
int main(int argc, char* argv[]) {
v8::V8::InitializeICUDefaultLocation(argv[0]);
v8::V8::InitializeExternalStartupData(argv[0]);
v8::Platform* platform = v8::platform::CreateDefaultPlatform();
v8::V8::InitializePlatform(platform);
std::unique_ptr<v8::Platform> platform = v8::platform::NewDefaultPlatform();
v8::V8::InitializePlatform(platform.get());
v8::V8::Initialize();
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
v8::Isolate::CreateParams create_params;
@ -85,13 +85,12 @@ int main(int argc, char* argv[]) {
return 1;
}
v8::Context::Scope context_scope(context);
result = RunMain(isolate, platform, argc, argv);
if (run_shell) RunShell(context, platform);
result = RunMain(isolate, platform.get(), argc, argv);
if (run_shell) RunShell(context, platform.get());
}
isolate->Dispose();
v8::V8::Dispose();
v8::V8::ShutdownPlatform();
delete platform;
delete create_params.array_buffer_allocator;
return result;
}

View File

@ -63,6 +63,12 @@ if (v8_snapshot_toolchain == "") {
} else if (current_os == "win" && host_os == "mac" && is_clang) {
# This is a mac -> win cross-compile, which is only supported w/ clang.
v8_snapshot_toolchain = "//build/toolchain/mac:clang_${v8_current_cpu}"
} else if (host_cpu == "x64" &&
(v8_current_cpu == "mips" || v8_current_cpu == "mips64")) {
# We don't support snapshot generation for big-endian targets,
# therefore snapshots will need to be built using native mksnapshot
# in combination with qemu
v8_snapshot_toolchain = current_toolchain
} else if (host_cpu == "x64") {
# This is a cross-compile from an x64 host to either a non-Intel target
# cpu or a different target OS. Clang will always be used by default on the
@ -76,11 +82,9 @@ if (v8_snapshot_toolchain == "") {
if (v8_current_cpu == "x64" || v8_current_cpu == "x86") {
_cpus = v8_current_cpu
} else if (v8_current_cpu == "arm64" || v8_current_cpu == "mips64el" ||
v8_current_cpu == "mips64") {
} else if (v8_current_cpu == "arm64" || v8_current_cpu == "mips64el") {
_cpus = "x64_v8_${v8_current_cpu}"
} else if (v8_current_cpu == "arm" || v8_current_cpu == "mipsel" ||
v8_current_cpu == "mips") {
} else if (v8_current_cpu == "arm" || v8_current_cpu == "mipsel") {
_cpus = "x86_v8_${v8_current_cpu}"
} else {
# This branch should not be reached; leave _cpus blank so the assert

3
deps/v8/src/OWNERS vendored
View File

@ -3,5 +3,8 @@ per-file intl.*=mnita@google.com
per-file intl.*=jshin@chromium.org
per-file typing-asm.*=aseemgarg@chromium.org
per-file typing-asm.*=bradnelson@chromium.org
per-file objects-body-descriptors*=hpayer@chromium.org
per-file objects-body-descriptors*=mlippautz@chromium.org
per-file objects-body-descriptors*=ulan@chromium.org
# COMPONENT: Blink>JavaScript

View File

@ -20,10 +20,9 @@ namespace internal {
Handle<AccessorInfo> Accessors::MakeAccessor(
Isolate* isolate, Handle<Name> name, AccessorNameGetterCallback getter,
AccessorNameBooleanSetterCallback setter, PropertyAttributes attributes) {
AccessorNameBooleanSetterCallback setter) {
Factory* factory = isolate->factory();
Handle<AccessorInfo> info = factory->NewAccessorInfo();
info->set_property_attributes(attributes);
info->set_all_can_read(false);
info->set_all_can_write(false);
info->set_is_special_data_property(true);
@ -44,13 +43,12 @@ Handle<AccessorInfo> Accessors::MakeAccessor(
return info;
}
static V8_INLINE bool CheckForName(Handle<Name> name,
Handle<String> property_name,
int offset,
int* object_offset) {
Handle<String> property_name, int offset,
FieldIndex::Encoding encoding,
FieldIndex* index) {
if (Name::Equals(name, property_name)) {
*object_offset = offset;
*index = FieldIndex::ForInObjectOffset(offset, encoding);
return true;
}
return false;
@ -60,18 +58,17 @@ static V8_INLINE bool CheckForName(Handle<Name> name,
// Returns true for properties that are accessors to object fields.
// If true, *object_offset contains offset of object field.
bool Accessors::IsJSObjectFieldAccessor(Handle<Map> map, Handle<Name> name,
int* object_offset) {
FieldIndex* index) {
Isolate* isolate = name->GetIsolate();
switch (map->instance_type()) {
case JS_ARRAY_TYPE:
return
CheckForName(name, isolate->factory()->length_string(),
JSArray::kLengthOffset, object_offset);
return CheckForName(name, isolate->factory()->length_string(),
JSArray::kLengthOffset, FieldIndex::kTagged, index);
default:
if (map->instance_type() < FIRST_NONSTRING_TYPE) {
return CheckForName(name, isolate->factory()->length_string(),
String::kLengthOffset, object_offset);
String::kLengthOffset, FieldIndex::kTagged, index);
}
return false;
@ -135,12 +132,9 @@ void Accessors::ArgumentsIteratorGetter(
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(result, isolate)));
}
Handle<AccessorInfo> Accessors::ArgumentsIteratorInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<AccessorInfo> Accessors::MakeArgumentsIteratorInfo(Isolate* isolate) {
Handle<Name> name = isolate->factory()->iterator_symbol();
return MakeAccessor(isolate, name, &ArgumentsIteratorGetter, nullptr,
attributes);
return MakeAccessor(isolate, name, &ArgumentsIteratorGetter, nullptr);
}
@ -220,14 +214,9 @@ void Accessors::ArrayLengthSetter(
}
}
Handle<AccessorInfo> Accessors::ArrayLengthInfo(
Isolate* isolate, PropertyAttributes attributes) {
return MakeAccessor(isolate,
isolate->factory()->length_string(),
&ArrayLengthGetter,
&ArrayLengthSetter,
attributes);
Handle<AccessorInfo> Accessors::MakeArrayLengthInfo(Isolate* isolate) {
return MakeAccessor(isolate, isolate->factory()->length_string(),
&ArrayLengthGetter, &ArrayLengthSetter);
}
//
@ -268,10 +257,10 @@ void Accessors::ModuleNamespaceEntrySetter(
}
}
Handle<AccessorInfo> Accessors::ModuleNamespaceEntryInfo(
Isolate* isolate, Handle<String> name, PropertyAttributes attributes) {
Handle<AccessorInfo> Accessors::MakeModuleNamespaceEntryInfo(
Isolate* isolate, Handle<String> name) {
return MakeAccessor(isolate, name, &ModuleNamespaceEntryGetter,
&ModuleNamespaceEntrySetter, attributes);
&ModuleNamespaceEntrySetter);
}
@ -302,11 +291,9 @@ void Accessors::StringLengthGetter(
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(result, isolate)));
}
Handle<AccessorInfo> Accessors::StringLengthInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<AccessorInfo> Accessors::MakeStringLengthInfo(Isolate* isolate) {
return MakeAccessor(isolate, isolate->factory()->length_string(),
&StringLengthGetter, nullptr, attributes);
&StringLengthGetter, nullptr);
}
@ -327,13 +314,10 @@ void Accessors::ScriptColumnOffsetGetter(
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
}
Handle<AccessorInfo> Accessors::ScriptColumnOffsetInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<AccessorInfo> Accessors::MakeScriptColumnOffsetInfo(Isolate* isolate) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("column_offset")));
return MakeAccessor(isolate, name, &ScriptColumnOffsetGetter, nullptr,
attributes);
return MakeAccessor(isolate, name, &ScriptColumnOffsetGetter, nullptr);
}
@ -353,12 +337,10 @@ void Accessors::ScriptIdGetter(
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(id, isolate)));
}
Handle<AccessorInfo> Accessors::ScriptIdInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<AccessorInfo> Accessors::MakeScriptIdInfo(Isolate* isolate) {
Handle<String> name(
isolate->factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("id")));
return MakeAccessor(isolate, name, &ScriptIdGetter, nullptr, attributes);
return MakeAccessor(isolate, name, &ScriptIdGetter, nullptr);
}
@ -378,11 +360,9 @@ void Accessors::ScriptNameGetter(
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(source, isolate)));
}
Handle<AccessorInfo> Accessors::ScriptNameInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<AccessorInfo> Accessors::MakeScriptNameInfo(Isolate* isolate) {
return MakeAccessor(isolate, isolate->factory()->name_string(),
&ScriptNameGetter, nullptr, attributes);
&ScriptNameGetter, nullptr);
}
@ -402,11 +382,9 @@ void Accessors::ScriptSourceGetter(
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(source, isolate)));
}
Handle<AccessorInfo> Accessors::ScriptSourceInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<AccessorInfo> Accessors::MakeScriptSourceInfo(Isolate* isolate) {
return MakeAccessor(isolate, isolate->factory()->source_string(),
&ScriptSourceGetter, nullptr, attributes);
&ScriptSourceGetter, nullptr);
}
@ -427,13 +405,10 @@ void Accessors::ScriptLineOffsetGetter(
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
}
Handle<AccessorInfo> Accessors::ScriptLineOffsetInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<AccessorInfo> Accessors::MakeScriptLineOffsetInfo(Isolate* isolate) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("line_offset")));
return MakeAccessor(isolate, name, &ScriptLineOffsetGetter, nullptr,
attributes);
return MakeAccessor(isolate, name, &ScriptLineOffsetGetter, nullptr);
}
@ -454,12 +429,10 @@ void Accessors::ScriptTypeGetter(
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
}
Handle<AccessorInfo> Accessors::ScriptTypeInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<AccessorInfo> Accessors::MakeScriptTypeInfo(Isolate* isolate) {
Handle<String> name(
isolate->factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("type")));
return MakeAccessor(isolate, name, &ScriptTypeGetter, nullptr, attributes);
return MakeAccessor(isolate, name, &ScriptTypeGetter, nullptr);
}
@ -480,13 +453,11 @@ void Accessors::ScriptCompilationTypeGetter(
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
}
Handle<AccessorInfo> Accessors::ScriptCompilationTypeInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<AccessorInfo> Accessors::MakeScriptCompilationTypeInfo(
Isolate* isolate) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("compilation_type")));
return MakeAccessor(isolate, name, &ScriptCompilationTypeGetter, nullptr,
attributes);
return MakeAccessor(isolate, name, &ScriptCompilationTypeGetter, nullptr);
}
@ -506,11 +477,10 @@ void Accessors::ScriptSourceUrlGetter(
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(url, isolate)));
}
Handle<AccessorInfo> Accessors::ScriptSourceUrlInfo(
Isolate* isolate, PropertyAttributes attributes) {
return MakeAccessor(isolate, isolate->factory()->source_url_string(),
&ScriptSourceUrlGetter, nullptr, attributes);
Handle<AccessorInfo> Accessors::MakeScriptSourceUrlInfo(Isolate* isolate) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("source_url")));
return MakeAccessor(isolate, name, &ScriptSourceUrlGetter, nullptr);
}
@ -531,11 +501,11 @@ void Accessors::ScriptSourceMappingUrlGetter(
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(url, isolate)));
}
Handle<AccessorInfo> Accessors::ScriptSourceMappingUrlInfo(
Isolate* isolate, PropertyAttributes attributes) {
return MakeAccessor(isolate, isolate->factory()->source_mapping_url_string(),
&ScriptSourceMappingUrlGetter, nullptr, attributes);
Handle<AccessorInfo> Accessors::MakeScriptSourceMappingUrlInfo(
Isolate* isolate) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("source_mapping_url")));
return MakeAccessor(isolate, name, &ScriptSourceMappingUrlGetter, nullptr);
}
@ -555,13 +525,10 @@ void Accessors::ScriptContextDataGetter(
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
}
Handle<AccessorInfo> Accessors::ScriptContextDataInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<AccessorInfo> Accessors::MakeScriptContextDataInfo(Isolate* isolate) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("context_data")));
return MakeAccessor(isolate, name, &ScriptContextDataGetter, nullptr,
attributes);
return MakeAccessor(isolate, name, &ScriptContextDataGetter, nullptr);
}
@ -591,13 +558,10 @@ void Accessors::ScriptEvalFromScriptGetter(
info.GetReturnValue().Set(Utils::ToLocal(result));
}
Handle<AccessorInfo> Accessors::ScriptEvalFromScriptInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<AccessorInfo> Accessors::MakeScriptEvalFromScriptInfo(Isolate* isolate) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("eval_from_script")));
return MakeAccessor(isolate, name, &ScriptEvalFromScriptGetter, nullptr,
attributes);
return MakeAccessor(isolate, name, &ScriptEvalFromScriptGetter, nullptr);
}
@ -621,13 +585,12 @@ void Accessors::ScriptEvalFromScriptPositionGetter(
info.GetReturnValue().Set(Utils::ToLocal(result));
}
Handle<AccessorInfo> Accessors::ScriptEvalFromScriptPositionInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<AccessorInfo> Accessors::MakeScriptEvalFromScriptPositionInfo(
Isolate* isolate) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("eval_from_script_position")));
return MakeAccessor(isolate, name, &ScriptEvalFromScriptPositionGetter,
nullptr, attributes);
nullptr);
}
@ -654,13 +617,12 @@ void Accessors::ScriptEvalFromFunctionNameGetter(
info.GetReturnValue().Set(Utils::ToLocal(result));
}
Handle<AccessorInfo> Accessors::ScriptEvalFromFunctionNameInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<AccessorInfo> Accessors::MakeScriptEvalFromFunctionNameInfo(
Isolate* isolate) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("eval_from_function_name")));
return MakeAccessor(isolate, name, &ScriptEvalFromFunctionNameGetter, nullptr,
attributes);
return MakeAccessor(isolate, name, &ScriptEvalFromFunctionNameGetter,
nullptr);
}
@ -704,14 +666,9 @@ void Accessors::FunctionPrototypeSetter(
info.GetReturnValue().Set(true);
}
Handle<AccessorInfo> Accessors::FunctionPrototypeInfo(
Isolate* isolate, PropertyAttributes attributes) {
return MakeAccessor(isolate,
isolate->factory()->prototype_string(),
&FunctionPrototypeGetter,
&FunctionPrototypeSetter,
attributes);
Handle<AccessorInfo> Accessors::MakeFunctionPrototypeInfo(Isolate* isolate) {
return MakeAccessor(isolate, isolate->factory()->prototype_string(),
&FunctionPrototypeGetter, &FunctionPrototypeSetter);
}
@ -724,6 +681,7 @@ void Accessors::FunctionLengthGetter(
v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::FunctionLengthGetter);
HandleScope scope(isolate);
Handle<JSFunction> function =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
@ -735,11 +693,9 @@ void Accessors::FunctionLengthGetter(
info.GetReturnValue().Set(Utils::ToLocal(result));
}
Handle<AccessorInfo> Accessors::FunctionLengthInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<AccessorInfo> Accessors::MakeFunctionLengthInfo(Isolate* isolate) {
return MakeAccessor(isolate, isolate->factory()->length_string(),
&FunctionLengthGetter, &ReconfigureToDataProperty,
attributes);
&FunctionLengthGetter, &ReconfigureToDataProperty);
}
@ -759,11 +715,9 @@ void Accessors::FunctionNameGetter(
info.GetReturnValue().Set(Utils::ToLocal(result));
}
Handle<AccessorInfo> Accessors::FunctionNameInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<AccessorInfo> Accessors::MakeFunctionNameInfo(Isolate* isolate) {
return MakeAccessor(isolate, isolate->factory()->name_string(),
&FunctionNameGetter, &ReconfigureToDataProperty,
attributes);
&FunctionNameGetter, &ReconfigureToDataProperty);
}
@ -771,12 +725,11 @@ Handle<AccessorInfo> Accessors::FunctionNameInfo(
// Accessors::FunctionArguments
//
namespace {
static Handle<Object> ArgumentsForInlinedFunction(
JavaScriptFrame* frame,
Handle<JSFunction> inlined_function,
int inlined_frame_index) {
Isolate* isolate = inlined_function->GetIsolate();
Handle<JSObject> ArgumentsForInlinedFunction(JavaScriptFrame* frame,
int inlined_frame_index) {
Isolate* isolate = frame->isolate();
Factory* factory = isolate->factory();
TranslatedState translated_values(frame);
@ -788,7 +741,9 @@ static Handle<Object> ArgumentsForInlinedFunction(
&argument_count);
TranslatedFrame::iterator iter = translated_frame->begin();
// Skip the function.
// Materialize the function.
bool should_deoptimize = iter->IsMaterializedObject();
Handle<JSFunction> function = Handle<JSFunction>::cast(iter->GetValue());
iter++;
// Skip the receiver.
@ -796,9 +751,8 @@ static Handle<Object> ArgumentsForInlinedFunction(
argument_count--;
Handle<JSObject> arguments =
factory->NewArgumentsObject(inlined_function, argument_count);
factory->NewArgumentsObject(function, argument_count);
Handle<FixedArray> array = factory->NewFixedArray(argument_count);
bool should_deoptimize = false;
for (int i = 0; i < argument_count; ++i) {
// If we materialize any object, we should deoptimize the frame because we
// might alias an object that was eliminated by escape analysis.
@ -817,9 +771,7 @@ static Handle<Object> ArgumentsForInlinedFunction(
return arguments;
}
static int FindFunctionInFrame(JavaScriptFrame* frame,
Handle<JSFunction> function) {
int FindFunctionInFrame(JavaScriptFrame* frame, Handle<JSFunction> function) {
std::vector<FrameSummary> frames;
frame->Summarize(&frames);
for (size_t i = frames.size(); i != 0; i--) {
@ -830,69 +782,66 @@ static int FindFunctionInFrame(JavaScriptFrame* frame,
return -1;
}
Handle<JSObject> GetFrameArguments(Isolate* isolate,
JavaScriptFrameIterator* it,
int function_index) {
JavaScriptFrame* frame = it->frame();
namespace {
Handle<Object> GetFunctionArguments(Isolate* isolate,
Handle<JSFunction> function) {
// Find the top invocation of the function by traversing frames.
for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) {
JavaScriptFrame* frame = it.frame();
int function_index = FindFunctionInFrame(frame, function);
if (function_index < 0) continue;
if (function_index > 0) {
// The function in question was inlined. Inlined functions have the
// correct number of arguments and no allocated arguments object, so
// we can construct a fresh one by interpreting the function's
// deoptimization input data.
return ArgumentsForInlinedFunction(frame, function, function_index);
}
// Find the frame that holds the actual arguments passed to the function.
if (it.frame()->has_adapted_arguments()) {
it.AdvanceOneFrame();
DCHECK(it.frame()->is_arguments_adaptor());
}
frame = it.frame();
// Get the number of arguments and construct an arguments object
// mirror for the right frame.
const int length = frame->ComputeParametersCount();
Handle<JSObject> arguments = isolate->factory()->NewArgumentsObject(
function, length);
Handle<FixedArray> array = isolate->factory()->NewFixedArray(length);
// Copy the parameters to the arguments object.
DCHECK(array->length() == length);
for (int i = 0; i < length; i++) {
Object* value = frame->GetParameter(i);
if (value->IsTheHole(isolate)) {
// Generators currently use holes as dummy arguments when resuming. We
// must not leak those.
DCHECK(IsResumableFunction(function->shared()->kind()));
value = isolate->heap()->undefined_value();
}
array->set(i, value);
}
arguments->set_elements(*array);
// Return the freshly allocated arguments object.
return arguments;
if (function_index > 0) {
// The function in question was inlined. Inlined functions have the
// correct number of arguments and no allocated arguments object, so
// we can construct a fresh one by interpreting the function's
// deoptimization input data.
return ArgumentsForInlinedFunction(frame, function_index);
}
// No frame corresponding to the given function found. Return null.
return isolate->factory()->null_value();
// Find the frame that holds the actual arguments passed to the function.
if (it->frame()->has_adapted_arguments()) {
it->AdvanceOneFrame();
DCHECK(it->frame()->is_arguments_adaptor());
}
frame = it->frame();
// Get the number of arguments and construct an arguments object
// mirror for the right frame and the underlying function.
const int length = frame->ComputeParametersCount();
Handle<JSFunction> function(frame->function(), isolate);
Handle<JSObject> arguments =
isolate->factory()->NewArgumentsObject(function, length);
Handle<FixedArray> array = isolate->factory()->NewFixedArray(length);
// Copy the parameters to the arguments object.
DCHECK(array->length() == length);
for (int i = 0; i < length; i++) {
Object* value = frame->GetParameter(i);
if (value->IsTheHole(isolate)) {
// Generators currently use holes as dummy arguments when resuming. We
// must not leak those.
DCHECK(IsResumableFunction(function->shared()->kind()));
value = isolate->heap()->undefined_value();
}
array->set(i, value);
}
arguments->set_elements(*array);
// Return the freshly allocated arguments object.
return arguments;
}
} // namespace
Handle<JSObject> Accessors::FunctionGetArguments(Handle<JSFunction> function) {
Handle<Object> arguments =
GetFunctionArguments(function->GetIsolate(), function);
CHECK(arguments->IsJSObject());
return Handle<JSObject>::cast(arguments);
Handle<JSObject> Accessors::FunctionGetArguments(JavaScriptFrame* frame,
int inlined_jsframe_index) {
Isolate* isolate = frame->isolate();
Address requested_frame_fp = frame->fp();
// Forward a frame iterator to the requested frame. This is needed because we
// potentially need for advance it to the arguments adaptor frame later.
for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) {
if (it.frame()->fp() != requested_frame_fp) continue;
return GetFrameArguments(isolate, &it, inlined_jsframe_index);
}
UNREACHABLE(); // Requested frame not found.
return Handle<JSObject>();
}
@ -903,18 +852,24 @@ void Accessors::FunctionArgumentsGetter(
HandleScope scope(isolate);
Handle<JSFunction> function =
Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
Handle<Object> result =
function->shared()->native()
? Handle<Object>::cast(isolate->factory()->null_value())
: GetFunctionArguments(isolate, function);
Handle<Object> result = isolate->factory()->null_value();
if (!function->shared()->native()) {
// Find the top invocation of the function by traversing frames.
for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) {
JavaScriptFrame* frame = it.frame();
int function_index = FindFunctionInFrame(frame, function);
if (function_index >= 0) {
result = GetFrameArguments(isolate, &it, function_index);
break;
}
}
}
info.GetReturnValue().Set(Utils::ToLocal(result));
}
Handle<AccessorInfo> Accessors::FunctionArgumentsInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<AccessorInfo> Accessors::MakeFunctionArgumentsInfo(Isolate* isolate) {
return MakeAccessor(isolate, isolate->factory()->arguments_string(),
&FunctionArgumentsGetter, nullptr, attributes);
&FunctionArgumentsGetter, nullptr);
}
@ -1088,11 +1043,9 @@ void Accessors::FunctionCallerGetter(
info.GetReturnValue().Set(Utils::ToLocal(result));
}
Handle<AccessorInfo> Accessors::FunctionCallerInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<AccessorInfo> Accessors::MakeFunctionCallerInfo(Isolate* isolate) {
return MakeAccessor(isolate, isolate->factory()->caller_string(),
&FunctionCallerGetter, nullptr, attributes);
&FunctionCallerGetter, nullptr);
}
@ -1118,11 +1071,9 @@ void Accessors::BoundFunctionLengthGetter(
info.GetReturnValue().Set(Utils::ToLocal(result));
}
Handle<AccessorInfo> Accessors::BoundFunctionLengthInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<AccessorInfo> Accessors::MakeBoundFunctionLengthInfo(Isolate* isolate) {
return MakeAccessor(isolate, isolate->factory()->length_string(),
&BoundFunctionLengthGetter, &ReconfigureToDataProperty,
attributes);
&BoundFunctionLengthGetter, &ReconfigureToDataProperty);
}
//
@ -1145,11 +1096,9 @@ void Accessors::BoundFunctionNameGetter(
info.GetReturnValue().Set(Utils::ToLocal(result));
}
Handle<AccessorInfo> Accessors::BoundFunctionNameInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<AccessorInfo> Accessors::MakeBoundFunctionNameInfo(Isolate* isolate) {
return MakeAccessor(isolate, isolate->factory()->name_string(),
&BoundFunctionNameGetter, &ReconfigureToDataProperty,
attributes);
&BoundFunctionNameGetter, &ReconfigureToDataProperty);
}
//
@ -1163,7 +1112,8 @@ MaybeHandle<JSReceiver> ClearInternalStackTrace(Isolate* isolate,
RETURN_ON_EXCEPTION(
isolate,
JSReceiver::SetProperty(error, isolate->factory()->stack_trace_symbol(),
isolate->factory()->undefined_value(), STRICT),
isolate->factory()->undefined_value(),
LanguageMode::kStrict),
JSReceiver);
return error;
}
@ -1261,12 +1211,9 @@ void Accessors::ErrorStackSetter(
Accessors::ReconfigureToDataProperty(name, val, info);
}
Handle<AccessorInfo> Accessors::ErrorStackInfo(Isolate* isolate,
PropertyAttributes attributes) {
Handle<AccessorInfo> info =
MakeAccessor(isolate, isolate->factory()->stack_string(),
&ErrorStackGetter, &ErrorStackSetter, attributes);
return info;
Handle<AccessorInfo> Accessors::MakeErrorStackInfo(Isolate* isolate) {
return MakeAccessor(isolate, isolate->factory()->stack_string(),
&ErrorStackGetter, &ErrorStackSetter);
}
} // namespace internal

View File

@ -17,34 +17,36 @@ namespace internal {
class AccessorInfo;
template <typename T>
class Handle;
class FieldIndex;
class JavaScriptFrame;
// The list of accessor descriptors. This is a second-order macro
// taking a macro to be applied to all accessor descriptor names.
#define ACCESSOR_INFO_LIST(V) \
V(ArgumentsIterator) \
V(ArrayLength) \
V(BoundFunctionLength) \
V(BoundFunctionName) \
V(ErrorStack) \
V(FunctionArguments) \
V(FunctionCaller) \
V(FunctionName) \
V(FunctionLength) \
V(FunctionPrototype) \
V(ScriptColumnOffset) \
V(ScriptCompilationType) \
V(ScriptContextData) \
V(ScriptEvalFromScript) \
V(ScriptEvalFromScriptPosition) \
V(ScriptEvalFromFunctionName) \
V(ScriptId) \
V(ScriptLineOffset) \
V(ScriptName) \
V(ScriptSource) \
V(ScriptType) \
V(ScriptSourceUrl) \
V(ScriptSourceMappingUrl) \
V(StringLength)
#define ACCESSOR_INFO_LIST(V) \
V(arguments_iterator, ArgumentsIterator) \
V(array_length, ArrayLength) \
V(bound_function_length, BoundFunctionLength) \
V(bound_function_name, BoundFunctionName) \
V(error_stack, ErrorStack) \
V(function_arguments, FunctionArguments) \
V(function_caller, FunctionCaller) \
V(function_name, FunctionName) \
V(function_length, FunctionLength) \
V(function_prototype, FunctionPrototype) \
V(script_column_offset, ScriptColumnOffset) \
V(script_compilation_type, ScriptCompilationType) \
V(script_context_data, ScriptContextData) \
V(script_eval_from_script, ScriptEvalFromScript) \
V(script_eval_from_script_position, ScriptEvalFromScriptPosition) \
V(script_eval_from_function_name, ScriptEvalFromFunctionName) \
V(script_id, ScriptId) \
V(script_line_offset, ScriptLineOffset) \
V(script_name, ScriptName) \
V(script_source, ScriptSource) \
V(script_type, ScriptType) \
V(script_source_url, ScriptSourceUrl) \
V(script_source_mapping_url, ScriptSourceMappingUrl) \
V(string_length, StringLength)
#define ACCESSOR_SETTER_LIST(V) \
V(ArrayLengthSetter) \
@ -57,45 +59,36 @@ class Handle;
class Accessors : public AllStatic {
public:
// Accessor descriptors.
#define ACCESSOR_INFO_DECLARATION(name) \
static void name##Getter( \
v8::Local<v8::Name> name, \
const v8::PropertyCallbackInfo<v8::Value>& info); \
static Handle<AccessorInfo> name##Info( \
Isolate* isolate, \
PropertyAttributes attributes);
ACCESSOR_INFO_LIST(ACCESSOR_INFO_DECLARATION)
#undef ACCESSOR_INFO_DECLARATION
#define ACCESSOR_GETTER_DECLARATION(accessor_name, AccessorName) \
static void AccessorName##Getter( \
v8::Local<v8::Name> name, \
const v8::PropertyCallbackInfo<v8::Value>& info);
ACCESSOR_INFO_LIST(ACCESSOR_GETTER_DECLARATION)
#undef ACCESSOR_GETTER_DECLARATION
#define ACCESSOR_SETTER_DECLARATION(name) \
static void name(v8::Local<v8::Name> name, v8::Local<v8::Value> value, \
const v8::PropertyCallbackInfo<v8::Boolean>& info);
#define ACCESSOR_SETTER_DECLARATION(accessor_name) \
static void accessor_name( \
v8::Local<v8::Name> name, v8::Local<v8::Value> value, \
const v8::PropertyCallbackInfo<v8::Boolean>& info);
ACCESSOR_SETTER_LIST(ACCESSOR_SETTER_DECLARATION)
#undef ACCESSOR_SETTER_DECLARATION
static void ModuleNamespaceEntryGetter(
v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info);
static Handle<AccessorInfo> ModuleNamespaceEntryInfo(
Isolate* isolate, Handle<String> name, PropertyAttributes attributes);
static Handle<AccessorInfo> MakeModuleNamespaceEntryInfo(Isolate* isolate,
Handle<String> name);
enum DescriptorId {
#define ACCESSOR_INFO_DECLARATION(name) \
k##name##Getter, \
k##name##Setter,
ACCESSOR_INFO_LIST(ACCESSOR_INFO_DECLARATION)
#undef ACCESSOR_INFO_DECLARATION
descriptorCount
};
// Accessor functions called directly from the runtime system.
static Handle<JSObject> FunctionGetArguments(Handle<JSFunction> object);
// Accessor function called directly from the runtime system. Returns the
// newly materialized arguments object for the given {frame}. Note that for
// optimized frames it is possible to specify an {inlined_jsframe_index}.
static Handle<JSObject> FunctionGetArguments(JavaScriptFrame* frame,
int inlined_jsframe_index);
// Returns true for properties that are accessors to object fields.
// If true, *object_offset contains offset of object field.
// If true, the matching FieldIndex is returned through |field_index|.
static bool IsJSObjectFieldAccessor(Handle<Map> map, Handle<Name> name,
int* object_offset);
FieldIndex* field_index);
// Create an AccessorInfo. The setter is optional (can be nullptr).
//
@ -111,7 +104,15 @@ class Accessors : public AllStatic {
static Handle<AccessorInfo> MakeAccessor(
Isolate* isolate, Handle<Name> name, AccessorNameGetterCallback getter,
AccessorNameBooleanSetterCallback setter, PropertyAttributes attributes);
AccessorNameBooleanSetterCallback setter);
private:
#define ACCESSOR_INFO_DECLARATION(accessor_name, AccessorName) \
static Handle<AccessorInfo> Make##AccessorName##Info(Isolate* isolate);
ACCESSOR_INFO_LIST(ACCESSOR_INFO_DECLARATION)
#undef ACCESSOR_INFO_DECLARATION
friend class Heap;
};
} // namespace internal

View File

@ -12,7 +12,7 @@ namespace internal {
RootIndexMap::RootIndexMap(Isolate* isolate) {
map_ = isolate->root_index_map();
if (map_ != NULL) return;
if (map_ != nullptr) return;
map_ = new HeapObjectToIndexHashMap();
for (uint32_t i = 0; i < Heap::kStrongRootListLength; i++) {
Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(i);

View File

@ -6,11 +6,8 @@
#include <stdlib.h> // For free, malloc.
#include "src/base/bits.h"
#include "src/base/lazy-instance.h"
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
#include "src/base/utils/random-number-generator.h"
#include "src/flags.h"
#include "src/utils.h"
#include "src/v8.h"
@ -106,28 +103,33 @@ void AlignedFree(void *ptr) {
#endif
}
VirtualMemory::VirtualMemory() : address_(nullptr), size_(0) {}
VirtualMemory::VirtualMemory(size_t size, void* hint)
: address_(base::OS::ReserveRegion(size, hint)), size_(size) {
#if defined(LEAK_SANITIZER)
__lsan_register_root_region(address_, size_);
#endif
byte* AllocateSystemPage(void* address, size_t* allocated) {
size_t page_size = base::OS::AllocatePageSize();
void* result = base::OS::Allocate(address, page_size, page_size,
base::OS::MemoryPermission::kReadWrite);
if (result != nullptr) *allocated = page_size;
return static_cast<byte*>(result);
}
VirtualMemory::VirtualMemory(size_t size, size_t alignment, void* hint)
VirtualMemory::VirtualMemory() : address_(nullptr), size_(0) {}
VirtualMemory::VirtualMemory(size_t size, void* hint, size_t alignment)
: address_(nullptr), size_(0) {
address_ = base::OS::ReserveAlignedRegion(size, alignment, hint, &size_);
size_t page_size = base::OS::AllocatePageSize();
size_t alloc_size = RoundUp(size, page_size);
address_ = base::OS::Allocate(hint, alloc_size, alignment,
base::OS::MemoryPermission::kNoAccess);
if (address_ != nullptr) {
size_ = alloc_size;
#if defined(LEAK_SANITIZER)
__lsan_register_root_region(address_, size_);
__lsan_register_root_region(address_, size_);
#endif
}
}
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
bool result = base::OS::ReleaseRegion(address(), size());
DCHECK(result);
USE(result);
Free();
}
}
@ -136,24 +138,19 @@ void VirtualMemory::Reset() {
size_ = 0;
}
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
bool VirtualMemory::SetPermissions(void* address, size_t size,
base::OS::MemoryPermission access) {
CHECK(InVM(address, size));
return base::OS::CommitRegion(address, size, is_executable);
bool result = base::OS::SetPermissions(address, size, access);
DCHECK(result);
USE(result);
return result;
}
bool VirtualMemory::Uncommit(void* address, size_t size) {
CHECK(InVM(address, size));
return base::OS::UncommitRegion(address, size);
}
bool VirtualMemory::Guard(void* address) {
CHECK(InVM(address, base::OS::CommitPageSize()));
base::OS::Guard(address, base::OS::CommitPageSize());
return true;
}
size_t VirtualMemory::ReleasePartial(void* free_start) {
size_t VirtualMemory::Release(void* free_start) {
DCHECK(IsReserved());
DCHECK(IsAddressAligned(static_cast<Address>(free_start),
base::OS::CommitPageSize()));
// Notice: Order is important here. The VirtualMemory object might live
// inside the allocated region.
const size_t free_size = size_ - (reinterpret_cast<size_t>(free_start) -
@ -166,14 +163,12 @@ size_t VirtualMemory::ReleasePartial(void* free_start) {
__lsan_unregister_root_region(address_, size_);
__lsan_register_root_region(address_, size_ - free_size);
#endif
const bool result = base::OS::ReleasePartialRegion(free_start, free_size);
USE(result);
DCHECK(result);
CHECK(base::OS::Release(free_start, free_size));
size_ -= free_size;
return free_size;
}
void VirtualMemory::Release() {
void VirtualMemory::Free() {
DCHECK(IsReserved());
// Notice: Order is important here. The VirtualMemory object might live
// inside the allocated region.
@ -181,9 +176,10 @@ void VirtualMemory::Release() {
size_t size = size_;
CHECK(InVM(address, size));
Reset();
bool result = base::OS::ReleaseRegion(address, size);
USE(result);
DCHECK(result);
#if defined(LEAK_SANITIZER)
__lsan_unregister_root_region(address, size);
#endif
CHECK(base::OS::Free(address, size));
}
void VirtualMemory::TakeControl(VirtualMemory* from) {
@ -208,116 +204,17 @@ bool AllocVirtualMemory(size_t size, void* hint, VirtualMemory* result) {
bool AlignedAllocVirtualMemory(size_t size, size_t alignment, void* hint,
VirtualMemory* result) {
VirtualMemory first_try(size, alignment, hint);
VirtualMemory first_try(size, hint, alignment);
if (first_try.IsReserved()) {
result->TakeControl(&first_try);
return true;
}
V8::GetCurrentPlatform()->OnCriticalMemoryPressure();
VirtualMemory second_try(size, alignment, hint);
VirtualMemory second_try(size, hint, alignment);
result->TakeControl(&second_try);
return result->IsReserved();
}
namespace {
struct RNGInitializer {
static void Construct(void* mem) {
auto rng = new (mem) base::RandomNumberGenerator();
int64_t random_seed = FLAG_random_seed;
if (random_seed) {
rng->SetSeed(random_seed);
}
}
};
} // namespace
static base::LazyInstance<base::RandomNumberGenerator, RNGInitializer>::type
random_number_generator = LAZY_INSTANCE_INITIALIZER;
void* GetRandomMmapAddr() {
#if defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
defined(THREAD_SANITIZER)
// Dynamic tools do not support custom mmap addresses.
return NULL;
#endif
uintptr_t raw_addr;
random_number_generator.Pointer()->NextBytes(&raw_addr, sizeof(raw_addr));
#if V8_OS_POSIX
#if V8_TARGET_ARCH_X64
// Currently available CPUs have 48 bits of virtual addressing. Truncate
// the hint address to 46 bits to give the kernel a fighting chance of
// fulfilling our placement request.
raw_addr &= V8_UINT64_C(0x3ffffffff000);
#elif V8_TARGET_ARCH_PPC64
#if V8_OS_AIX
// AIX: 64 bits of virtual addressing, but we limit address range to:
// a) minimize Segment Lookaside Buffer (SLB) misses and
raw_addr &= V8_UINT64_C(0x3ffff000);
// Use extra address space to isolate the mmap regions.
raw_addr += V8_UINT64_C(0x400000000000);
#elif V8_TARGET_BIG_ENDIAN
// Big-endian Linux: 44 bits of virtual addressing.
raw_addr &= V8_UINT64_C(0x03fffffff000);
#else
// Little-endian Linux: 48 bits of virtual addressing.
raw_addr &= V8_UINT64_C(0x3ffffffff000);
#endif
#elif V8_TARGET_ARCH_S390X
// Linux on Z uses bits 22-32 for Region Indexing, which translates to 42 bits
// of virtual addressing. Truncate to 40 bits to allow kernel chance to
// fulfill request.
raw_addr &= V8_UINT64_C(0xfffffff000);
#elif V8_TARGET_ARCH_S390
// 31 bits of virtual addressing. Truncate to 29 bits to allow kernel chance
// to fulfill request.
raw_addr &= 0x1ffff000;
#else
raw_addr &= 0x3ffff000;
#ifdef __sun
// For our Solaris/illumos mmap hint, we pick a random address in the bottom
// half of the top half of the address space (that is, the third quarter).
// Because we do not MAP_FIXED, this will be treated only as a hint -- the
// system will not fail to mmap() because something else happens to already
// be mapped at our random address. We deliberately set the hint high enough
// to get well above the system's break (that is, the heap); Solaris and
// illumos will try the hint and if that fails allocate as if there were
// no hint at all. The high hint prevents the break from getting hemmed in
// at low values, ceding half of the address space to the system heap.
raw_addr += 0x80000000;
#elif V8_OS_AIX
// The range 0x30000000 - 0xD0000000 is available on AIX;
// choose the upper range.
raw_addr += 0x90000000;
#else
// The range 0x20000000 - 0x60000000 is relatively unpopulated across a
// variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
// 10.6 and 10.7.
raw_addr += 0x20000000;
#endif
#endif
#else // V8_OS_WIN
// The address range used to randomize RWX allocations in OS::Allocate
// Try not to map pages into the default range that windows loads DLLs
// Use a multiple of 64k to prevent committing unused memory.
// Note: This does not guarantee RWX regions will be within the
// range kAllocationRandomAddressMin to kAllocationRandomAddressMax
#ifdef V8_HOST_ARCH_64_BIT
static const uintptr_t kAllocationRandomAddressMin = 0x0000000080000000;
static const uintptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000;
#else
static const uintptr_t kAllocationRandomAddressMin = 0x04000000;
static const uintptr_t kAllocationRandomAddressMax = 0x3FFF0000;
#endif
raw_addr <<= kPageSizeBits;
raw_addr += kAllocationRandomAddressMin;
raw_addr &= kAllocationRandomAddressMax;
#endif // V8_OS_WIN
return reinterpret_cast<void*>(raw_addr);
}
} // namespace internal
} // namespace v8

View File

@ -76,19 +76,22 @@ class FreeStoreAllocationPolicy {
void* AlignedAlloc(size_t size, size_t alignment);
void AlignedFree(void *ptr);
// Allocates a single system memory page with read/write permissions. The
// address parameter is a hint. Returns the base address of the memory, or null
// on failure. Permissions can be changed on the base address.
byte* AllocateSystemPage(void* address, size_t* allocated);
// Represents and controls an area of reserved memory.
class V8_EXPORT_PRIVATE VirtualMemory {
public:
// Empty VirtualMemory object, controlling no reserved memory.
VirtualMemory();
// Reserves virtual memory with size.
explicit VirtualMemory(size_t size, void* hint);
// Reserves virtual memory containing an area of the given size that
// is aligned per alignment. This may not be at the position returned
// by address().
VirtualMemory(size_t size, size_t alignment, void* hint);
// Reserves virtual memory containing an area of the given size that is
// aligned per alignment. This may not be at the position returned by
// address().
VirtualMemory(size_t size, void* hint,
size_t alignment = base::OS::AllocatePageSize());
// Construct a virtual memory by assigning it some already mapped address
// and size.
@ -125,19 +128,16 @@ class V8_EXPORT_PRIVATE VirtualMemory {
// than the requested size.
size_t size() const { return size_; }
// Commits real memory. Returns whether the operation succeeded.
bool Commit(void* address, size_t size, bool is_executable);
// Sets permissions according to the access argument. address and size must be
// multiples of CommitPageSize(). Returns true on success, otherwise false.
bool SetPermissions(void* address, size_t size,
base::OS::MemoryPermission access);
// Uncommit real memory. Returns whether the operation succeeded.
bool Uncommit(void* address, size_t size);
// Releases memory after |free_start|. Returns the number of bytes released.
size_t Release(void* free_start);
// Creates a single guard page at the given address.
bool Guard(void* address);
// Releases the memory after |free_start|. Returns the bytes released.
size_t ReleasePartial(void* free_start);
void Release();
// Frees all memory.
void Free();
// Assign control of the reserved region to a different VirtualMemory object.
// The old object is no longer functional (IsReserved() returns false).
@ -159,9 +159,6 @@ bool AllocVirtualMemory(size_t size, void* hint, VirtualMemory* result);
bool AlignedAllocVirtualMemory(size_t size, size_t alignment, void* hint,
VirtualMemory* result);
// Generate a random address to be used for hinting mmap().
V8_EXPORT_PRIVATE void* GetRandomMmapAddr();
} // namespace internal
} // namespace v8

View File

@ -80,7 +80,7 @@ class PropertyCallbackArguments
static const int kShouldThrowOnErrorIndex = T::kShouldThrowOnErrorIndex;
PropertyCallbackArguments(Isolate* isolate, Object* data, Object* self,
JSObject* holder, Object::ShouldThrow should_throw)
JSObject* holder, ShouldThrow should_throw)
: Super(isolate) {
Object** values = this->begin();
values[T::kThisIndex] = self;
@ -88,7 +88,7 @@ class PropertyCallbackArguments
values[T::kDataIndex] = data;
values[T::kIsolateIndex] = reinterpret_cast<Object*>(isolate);
values[T::kShouldThrowOnErrorIndex] =
Smi::FromInt(should_throw == Object::THROW_ON_ERROR ? 1 : 0);
Smi::FromInt(should_throw == kThrowOnError ? 1 : 0);
// Here the hole is set as default value.
// It cannot escape into js as it's removed in Call below.
@ -158,8 +158,6 @@ class FunctionCallbackArguments
static const int kReturnValueDefaultValueIndex =
T::kReturnValueDefaultValueIndex;
static const int kIsolateIndex = T::kIsolateIndex;
static const int kCalleeIndex = T::kCalleeIndex;
static const int kContextSaveIndex = T::kContextSaveIndex;
static const int kNewTargetIndex = T::kNewTargetIndex;
FunctionCallbackArguments(internal::Isolate* isolate, internal::Object* data,
@ -170,18 +168,14 @@ class FunctionCallbackArguments
: Super(isolate), argv_(argv), argc_(argc) {
Object** values = begin();
values[T::kDataIndex] = data;
values[T::kCalleeIndex] = callee;
values[T::kHolderIndex] = holder;
values[T::kNewTargetIndex] = new_target;
values[T::kContextSaveIndex] = isolate->heap()->the_hole_value();
values[T::kIsolateIndex] = reinterpret_cast<internal::Object*>(isolate);
// Here the hole is set as default value.
// It cannot escape into js as it's remove in Call below.
values[T::kReturnValueDefaultValueIndex] =
isolate->heap()->the_hole_value();
values[T::kReturnValueIndex] = isolate->heap()->the_hole_value();
DCHECK(values[T::kCalleeIndex]->IsJSFunction() ||
values[T::kCalleeIndex]->IsFunctionTemplateInfo());
DCHECK(values[T::kHolderIndex]->IsHeapObject());
DCHECK(values[T::kIsolateIndex]->IsSmi());
}

View File

@ -112,7 +112,7 @@ MaybeHandle<Object> DefineDataProperty(Isolate* isolate,
#endif
MAYBE_RETURN_NULL(
Object::AddDataProperty(&it, value, attributes, Object::THROW_ON_ERROR,
Object::AddDataProperty(&it, value, attributes, kThrowOnError,
Object::CERTAINLY_NOT_STORE_FROM_KEYED));
return value;
}
@ -212,7 +212,10 @@ MaybeHandle<JSObject> ConfigureInstance(Isolate* isolate, Handle<JSObject> obj,
// Install accumulated accessors.
for (int i = 0; i < valid_descriptors; i++) {
Handle<AccessorInfo> accessor(AccessorInfo::cast(array->get(i)));
JSObject::SetAccessor(obj, accessor).Assert();
Handle<Name> name(Name::cast(accessor->name()), isolate);
JSObject::SetAccessor(obj, name, accessor,
accessor->initial_property_attributes())
.Assert();
}
}
@ -282,10 +285,10 @@ MaybeHandle<JSObject> ProbeInstantiationsCache(Isolate* isolate,
} else if (caching_mode == CachingMode::kUnlimited ||
(serial_number <=
TemplateInfo::kSlowTemplateInstantiationsCacheSize)) {
Handle<UnseededNumberDictionary> slow_cache =
Handle<NumberDictionary> slow_cache =
isolate->slow_template_instantiations_cache();
int entry = slow_cache->FindEntry(serial_number);
if (entry == UnseededNumberDictionary::kNotFound) {
if (entry == NumberDictionary::kNotFound) {
return MaybeHandle<JSObject>();
}
return handle(JSObject::cast(slow_cache->ValueAt(entry)), isolate);
@ -310,10 +313,9 @@ void CacheTemplateInstantiation(Isolate* isolate, int serial_number,
} else if (caching_mode == CachingMode::kUnlimited ||
(serial_number <=
TemplateInfo::kSlowTemplateInstantiationsCacheSize)) {
Handle<UnseededNumberDictionary> cache =
Handle<NumberDictionary> cache =
isolate->slow_template_instantiations_cache();
auto new_cache =
UnseededNumberDictionary::Set(cache, serial_number, object);
auto new_cache = NumberDictionary::Set(cache, serial_number, object);
if (*new_cache != *cache) {
isolate->native_context()->set_slow_template_instantiations_cache(
*new_cache);
@ -332,11 +334,11 @@ void UncacheTemplateInstantiation(Isolate* isolate, int serial_number,
} else if (caching_mode == CachingMode::kUnlimited ||
(serial_number <=
TemplateInfo::kSlowTemplateInstantiationsCacheSize)) {
Handle<UnseededNumberDictionary> cache =
Handle<NumberDictionary> cache =
isolate->slow_template_instantiations_cache();
int entry = cache->FindEntry(serial_number);
DCHECK_NE(UnseededNumberDictionary::kNotFound, entry);
cache = UnseededNumberDictionary::DeleteEntry(cache, entry);
DCHECK_NE(NumberDictionary::kNotFound, entry);
cache = NumberDictionary::DeleteEntry(cache, entry);
isolate->native_context()->set_slow_template_instantiations_cache(*cache);
}
}
@ -631,17 +633,16 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
shared, isolate->native_context());
if (obj->remove_prototype()) {
result->set_map(*isolate->sloppy_function_without_prototype_map());
DCHECK(prototype.is_null());
DCHECK(result->shared()->IsApiFunction());
DCHECK(!result->has_initial_map());
DCHECK(!result->has_prototype());
DCHECK(!result->IsConstructor());
DCHECK(!result->has_prototype_slot());
return result;
}
// Down from here is only valid for API functions that can be used as a
// constructor (don't set the "remove prototype" flag).
DCHECK(result->has_prototype_slot());
if (obj->read_only_prototype()) {
result->set_map(*isolate->sloppy_function_with_readonly_prototype_map());

756
deps/v8/src/api.cc vendored

File diff suppressed because it is too large Load Diff

44
deps/v8/src/api.h vendored
View File

@ -108,7 +108,6 @@ class RegisteredExtension {
V(StackTrace, FixedArray) \
V(StackFrame, StackFrameInfo) \
V(Proxy, JSProxy) \
V(NativeWeakMap, JSWeakMap) \
V(debug::GeneratorObject, JSGeneratorObject) \
V(debug::Script, Script) \
V(Promise, JSPromise) \
@ -208,8 +207,6 @@ class Utils {
v8::internal::Handle<v8::internal::FunctionTemplateInfo> obj);
static inline Local<External> ExternalToLocal(
v8::internal::Handle<v8::internal::JSObject> obj);
static inline Local<NativeWeakMap> NativeWeakMapToLocal(
v8::internal::Handle<v8::internal::JSWeakMap> obj);
static inline Local<Function> CallableToLocal(
v8::internal::Handle<v8::internal::JSReceiver> obj);
static inline Local<Primitive> ToLocalPrimitive(
@ -332,7 +329,6 @@ MAKE_TO_LOCAL(NumberToLocal, Object, Number)
MAKE_TO_LOCAL(IntegerToLocal, Object, Integer)
MAKE_TO_LOCAL(Uint32ToLocal, Object, Uint32)
MAKE_TO_LOCAL(ExternalToLocal, JSObject, External)
MAKE_TO_LOCAL(NativeWeakMapToLocal, JSWeakMap, NativeWeakMap)
MAKE_TO_LOCAL(CallableToLocal, JSReceiver, Function)
MAKE_TO_LOCAL(ToLocalPrimitive, Object, Primitive)
MAKE_TO_LOCAL(ToLocal, FixedArray, PrimitiveArray)
@ -347,8 +343,8 @@ MAKE_TO_LOCAL(ScriptOrModuleToLocal, Script, ScriptOrModule)
#define MAKE_OPEN_HANDLE(From, To) \
v8::internal::Handle<v8::internal::To> Utils::OpenHandle( \
const v8::From* that, bool allow_empty_handle) { \
DCHECK(allow_empty_handle || that != NULL); \
DCHECK(that == NULL || \
DCHECK(allow_empty_handle || that != nullptr); \
DCHECK(that == nullptr || \
(*reinterpret_cast<v8::internal::Object* const*>(that))->Is##To()); \
return v8::internal::Handle<v8::internal::To>( \
reinterpret_cast<v8::internal::To**>(const_cast<v8::From*>(that))); \
@ -370,8 +366,8 @@ class V8_EXPORT_PRIVATE DeferredHandles {
private:
DeferredHandles(Object** first_block_limit, Isolate* isolate)
: next_(NULL),
previous_(NULL),
: next_(nullptr),
previous_(nullptr),
first_block_limit_(first_block_limit),
isolate_(isolate) {
isolate->LinkDeferredHandles(this);
@ -404,7 +400,7 @@ class HandleScopeImplementer {
explicit HandleScopeImplementer(Isolate* isolate)
: isolate_(isolate),
microtask_context_(nullptr),
spare_(NULL),
spare_(nullptr),
call_depth_(0),
microtasks_depth_(0),
microtasks_suppressions_(0),
@ -413,7 +409,7 @@ class HandleScopeImplementer {
debug_microtasks_depth_(0),
#endif
microtasks_policy_(v8::MicrotasksPolicy::kAuto),
last_handle_before_deferred_block_(NULL) {
last_handle_before_deferred_block_(nullptr) {
}
~HandleScopeImplementer() {
@ -487,8 +483,8 @@ class HandleScopeImplementer {
Isolate* isolate() const { return isolate_; }
void ReturnBlock(Object** block) {
DCHECK(block != NULL);
if (spare_ != NULL) DeleteArray(spare_);
DCHECK_NOT_NULL(block);
if (spare_ != nullptr) DeleteArray(spare_);
spare_ = block;
}
@ -499,8 +495,8 @@ class HandleScopeImplementer {
saved_contexts_.detach();
microtask_context_ = nullptr;
entered_context_count_during_microtasks_ = 0;
spare_ = NULL;
last_handle_before_deferred_block_ = NULL;
spare_ = nullptr;
last_handle_before_deferred_block_ = nullptr;
call_depth_ = 0;
}
@ -513,11 +509,11 @@ class HandleScopeImplementer {
blocks_.free();
entered_contexts_.free();
saved_contexts_.free();
if (spare_ != NULL) {
if (spare_ != nullptr) {
DeleteArray(spare_);
spare_ = NULL;
spare_ = nullptr;
}
DCHECK(call_depth_ == 0);
DCHECK_EQ(call_depth_, 0);
}
void BeginDeferredScope();
@ -620,10 +616,10 @@ Handle<Context> HandleScopeImplementer::MicrotaskContext() {
// If there's a spare block, use it for growing the current scope.
internal::Object** HandleScopeImplementer::GetSpareOrNewBlock() {
internal::Object** block = (spare_ != NULL) ?
spare_ :
NewArray<internal::Object*>(kHandleBlockSize);
spare_ = NULL;
internal::Object** block =
(spare_ != nullptr) ? spare_
: NewArray<internal::Object*>(kHandleBlockSize);
spare_ = nullptr;
return block;
}
@ -645,13 +641,13 @@ void HandleScopeImplementer::DeleteExtensions(internal::Object** prev_limit) {
#ifdef ENABLE_HANDLE_ZAPPING
internal::HandleScope::ZapRange(block_start, block_limit);
#endif
if (spare_ != NULL) {
if (spare_ != nullptr) {
DeleteArray(spare_);
}
spare_ = block_start;
}
DCHECK((blocks_.empty() && prev_limit == NULL) ||
(!blocks_.empty() && prev_limit != NULL));
DCHECK((blocks_.empty() && prev_limit == nullptr) ||
(!blocks_.empty() && prev_limit != nullptr));
}
// Interceptor functions called from generated inline caches to notify

View File

@ -67,14 +67,13 @@ void RelocInfo::apply(intptr_t delta) {
Address RelocInfo::target_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
return Assembler::target_address_at(pc_, host_);
}
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|| rmode_ == EMBEDDED_OBJECT
|| rmode_ == EXTERNAL_REFERENCE);
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_) ||
rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE);
if (Assembler::IsMovW(Memory::int32_at(pc_))) {
return reinterpret_cast<Address>(pc_);
} else {
@ -113,7 +112,7 @@ void RelocInfo::set_target_object(HeapObject* target,
Assembler::set_target_address_at(target->GetIsolate(), pc_, host_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
target);
host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
@ -157,9 +156,9 @@ void RelocInfo::WipeOut(Isolate* isolate) {
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_));
if (IsInternalReference(rmode_)) {
Memory::Address_at(pc_) = NULL;
Memory::Address_at(pc_) = nullptr;
} else {
Assembler::set_target_address_at(isolate, pc_, host_, NULL);
Assembler::set_target_address_at(isolate, pc_, host_, nullptr);
}
}
@ -384,14 +383,14 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
}
Address Assembler::target_address_at(Address pc, Code* code) {
Address constant_pool = code ? code->constant_pool() : NULL;
Address constant_pool = code ? code->constant_pool() : nullptr;
return target_address_at(pc, constant_pool);
}
void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
Address target,
ICacheFlushMode icache_flush_mode) {
Address constant_pool = code ? code->constant_pool() : NULL;
Address constant_pool = code ? code->constant_pool() : nullptr;
set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
}

View File

@ -250,12 +250,12 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
void CpuFeatures::PrintTarget() {
const char* arm_arch = NULL;
const char* arm_arch = nullptr;
const char* arm_target_type = "";
const char* arm_no_probe = "";
const char* arm_fpu = "";
const char* arm_thumb = "";
const char* arm_float_abi = NULL;
const char* arm_float_abi = nullptr;
#if !defined __arm__
arm_target_type = " simulator";
@ -357,6 +357,17 @@ void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
reinterpret_cast<Address>(size), flush_mode);
}
void RelocInfo::set_js_to_wasm_address(Isolate* isolate, Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
set_embedded_address(isolate, address, icache_flush_mode);
}
Address RelocInfo::js_to_wasm_address() const {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
return embedded_address();
}
// -----------------------------------------------------------------------------
// Implementation of Operand and MemOperand
// See assembler-arm-inl.h for inlined constructors
@ -382,7 +393,7 @@ Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
shift_op = LSL;
} else if (shift_op == RRX) {
// encoded as ROR with shift_imm == 0
DCHECK(shift_imm == 0);
DCHECK_EQ(shift_imm, 0);
shift_op_ = ROR;
shift_imm_ = 0;
}
@ -587,7 +598,7 @@ void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
void Assembler::Align(int m) {
DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
DCHECK((pc_offset() & (kInstrSize - 1)) == 0);
DCHECK_EQ(pc_offset() & (kInstrSize - 1), 0);
while ((pc_offset() & (m - 1)) != 0) {
nop();
}
@ -694,7 +705,7 @@ bool Assembler::IsAddRegisterImmediate(Instr instr) {
Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) {
DCHECK(IsAddRegisterImmediate(instr));
DCHECK(offset >= 0);
DCHECK_GE(offset, 0);
DCHECK(is_uint12(offset));
// Set the offset.
return (instr & ~kOff12Mask) | offset;
@ -930,14 +941,14 @@ void Assembler::target_at_put(int pos, int target_pos) {
instr_at_put(pos, instr | (imm24 & kImm24Mask));
}
void Assembler::print(Label* L) {
void Assembler::print(const Label* L) {
if (L->is_unused()) {
PrintF("unused label\n");
} else if (L->is_bound()) {
PrintF("bound label to %d\n", L->pos());
} else if (L->is_linked()) {
Label l = *L;
Label l;
l.link_to(L->pos());
PrintF("unbound label");
while (l.is_linked()) {
PrintF("@ %d ", l.pos());
@ -945,7 +956,7 @@ void Assembler::print(Label* L) {
if ((instr & ~kImm24Mask) == 0) {
PrintF("value\n");
} else {
DCHECK((instr & 7*B25) == 5*B25); // b, bl, or blx
DCHECK_EQ(instr & 7 * B25, 5 * B25); // b, bl, or blx
Condition cond = Instruction::ConditionField(instr);
const char* b;
const char* c;
@ -1019,7 +1030,7 @@ void Assembler::next(Label* L) {
// chain.
L->Unuse();
} else {
DCHECK(link >= 0);
DCHECK_GE(link, 0);
L->link_to(link);
}
}
@ -1043,9 +1054,9 @@ bool FitsShifter(uint32_t imm32, uint32_t* rotate_imm, uint32_t* immed_8,
}
// If the opcode is one with a complementary version and the complementary
// immediate fits, change the opcode.
if (instr != NULL) {
if (instr != nullptr) {
if ((*instr & kMovMvnMask) == kMovMvnPattern) {
if (FitsShifter(~imm32, rotate_imm, immed_8, NULL)) {
if (FitsShifter(~imm32, rotate_imm, immed_8, nullptr)) {
*instr ^= kMovMvnFlip;
return true;
} else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
@ -1059,7 +1070,7 @@ bool FitsShifter(uint32_t imm32, uint32_t* rotate_imm, uint32_t* immed_8,
}
}
} else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
if (FitsShifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
if (FitsShifter(-static_cast<int>(imm32), rotate_imm, immed_8, nullptr)) {
*instr ^= kCmpCmnFlip;
return true;
}
@ -1067,13 +1078,14 @@ bool FitsShifter(uint32_t imm32, uint32_t* rotate_imm, uint32_t* immed_8,
Instr alu_insn = (*instr & kALUMask);
if (alu_insn == ADD ||
alu_insn == SUB) {
if (FitsShifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
if (FitsShifter(-static_cast<int>(imm32), rotate_imm, immed_8,
nullptr)) {
*instr ^= kAddSubFlip;
return true;
}
} else if (alu_insn == AND ||
alu_insn == BIC) {
if (FitsShifter(~imm32, rotate_imm, immed_8, NULL)) {
if (FitsShifter(~imm32, rotate_imm, immed_8, nullptr)) {
*instr ^= kAndBicFlip;
return true;
}
@ -1089,7 +1101,7 @@ bool FitsShifter(uint32_t imm32, uint32_t* rotate_imm, uint32_t* immed_8,
// encoded.
bool MustOutputRelocInfo(RelocInfo::Mode rmode, const Assembler* assembler) {
if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
if (assembler != NULL && assembler->predictable_code_size()) return true;
if (assembler != nullptr && assembler->predictable_code_size()) return true;
return assembler->serializer_enabled();
} else if (RelocInfo::IsNone(rmode)) {
return false;
@ -1098,7 +1110,7 @@ bool MustOutputRelocInfo(RelocInfo::Mode rmode, const Assembler* assembler) {
}
bool UseMovImmediateLoad(const Operand& x, const Assembler* assembler) {
DCHECK(assembler != nullptr);
DCHECK_NOT_NULL(assembler);
if (x.MustOutputRelocInfo(assembler)) {
// Prefer constant pool if data is likely to be patched.
return false;
@ -1116,7 +1128,7 @@ bool Operand::MustOutputRelocInfo(const Assembler* assembler) const {
int Operand::InstructionsRequired(const Assembler* assembler,
Instr instr) const {
DCHECK(assembler != nullptr);
DCHECK_NOT_NULL(assembler);
if (rm_.is_valid()) return 1;
uint32_t dummy1, dummy2;
if (MustOutputRelocInfo(assembler) ||
@ -1196,7 +1208,7 @@ void Assembler::AddrMode1(Instr instr, Register rd, Register rn,
// For move instructions, rn is not defined.
DCHECK(rn.is_valid() || (opcode == MOV) || (opcode == MVN));
DCHECK(rd.is_valid() || rn.is_valid());
DCHECK((instr & ~(kCondMask | kOpCodeMask | S)) == 0);
DCHECK_EQ(instr & ~(kCondMask | kOpCodeMask | S), 0);
if (!AddrMode1TryEncodeOperand(&instr, x)) {
DCHECK(x.IsImmediate());
// Upon failure to encode, the opcode should not have changed.
@ -1207,6 +1219,26 @@ void Assembler::AddrMode1(Instr instr, Register rd, Register rn,
// pool only for a MOV instruction which does not set the flags.
DCHECK(!rn.is_valid());
Move32BitImmediate(rd, x, cond);
} else if ((opcode == ADD) && !set_flags && (rd == rn) &&
(scratch_register_list_ == 0)) {
// Split the operation into a sequence of additions if we cannot use a
// scratch register. In this case, we cannot re-use rn and the assembler
// does not have any scratch registers to spare.
uint32_t imm = x.immediate();
do {
// The immediate encoding format is composed of 8 bits of data and 4
// bits encoding a rotation. Each of the 16 possible rotations accounts
// for a rotation by an even number.
// 4 bits -> 16 rotations possible
// -> 16 rotations of 2 bits each fits in a 32-bit value.
// This means that finding the even number of trailing zeroes of the
// immediate allows us to more efficiently split it:
int trailing_zeroes = base::bits::CountTrailingZeros(imm) & ~1u;
uint32_t mask = (0xff << trailing_zeroes);
add(rd, rd, Operand(imm & mask), LeaveCC, cond);
imm = imm & ~mask;
} while (!ImmediateFitsAddrMode1Instruction(imm));
add(rd, rd, Operand(imm), LeaveCC, cond);
} else {
// The immediate operand cannot be encoded as a shifter operand, so load
// it first to a scratch register and change the original instruction to
@ -1283,7 +1315,7 @@ void Assembler::AddrMode2(Instr instr, Register rd, const MemOperand& x) {
AddrMode2(instr, rd, MemOperand(x.rn_, scratch, x.am_));
return;
}
DCHECK(offset_12 >= 0); // no masking needed
DCHECK_GE(offset_12, 0); // no masking needed
instr |= offset_12;
} else {
// Register offset (shift_imm_ and shift_op_ are 0) or scaled
@ -1320,7 +1352,7 @@ void Assembler::AddrMode3(Instr instr, Register rd, const MemOperand& x) {
AddrMode3(instr, rd, MemOperand(x.rn_, scratch, x.am_));
return;
}
DCHECK(offset_8 >= 0); // no masking needed
DCHECK_GE(offset_8, 0); // no masking needed
instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
} else if (x.shift_imm_ != 0) {
// Scaled register offsets are not supported, compute the offset separately
@ -1344,7 +1376,7 @@ void Assembler::AddrMode3(Instr instr, Register rd, const MemOperand& x) {
void Assembler::AddrMode4(Instr instr, Register rn, RegList rl) {
DCHECK((instr & ~(kCondMask | P | U | W | L)) == B27);
DCHECK(rl != 0);
DCHECK_NE(rl, 0);
DCHECK(rn != pc);
emit(instr | rn.code()*B16 | rl);
}
@ -1356,7 +1388,7 @@ void Assembler::AddrMode5(Instr instr, CRegister crd, const MemOperand& x) {
DCHECK(x.rn_.is_valid() && !x.rm_.is_valid());
int am = x.am_;
int offset_8 = x.offset_;
DCHECK((offset_8 & 3) == 0); // offset must be an aligned word offset
DCHECK_EQ(offset_8 & 3, 0); // offset must be an aligned word offset
offset_8 >>= 2;
if (offset_8 < 0) {
offset_8 = -offset_8;
@ -1369,7 +1401,7 @@ void Assembler::AddrMode5(Instr instr, CRegister crd, const MemOperand& x) {
if ((am & P) == 0)
am |= W;
DCHECK(offset_8 >= 0); // no masking needed
DCHECK_GE(offset_8, 0); // no masking needed
emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
}
@ -1399,7 +1431,7 @@ int Assembler::branch_offset(Label* L) {
// Branch instructions.
void Assembler::b(int branch_offset, Condition cond) {
DCHECK((branch_offset & 3) == 0);
DCHECK_EQ(branch_offset & 3, 0);
int imm24 = branch_offset >> 2;
CHECK(is_int24(imm24));
emit(cond | B27 | B25 | (imm24 & kImm24Mask));
@ -1412,14 +1444,14 @@ void Assembler::b(int branch_offset, Condition cond) {
void Assembler::bl(int branch_offset, Condition cond) {
DCHECK((branch_offset & 3) == 0);
DCHECK_EQ(branch_offset & 3, 0);
int imm24 = branch_offset >> 2;
CHECK(is_int24(imm24));
emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask));
}
void Assembler::blx(int branch_offset) {
DCHECK((branch_offset & 1) == 0);
DCHECK_EQ(branch_offset & 1, 0);
int h = ((branch_offset & 2) >> 1)*B24;
int imm24 = branch_offset >> 2;
CHECK(is_int24(imm24));
@ -2041,7 +2073,7 @@ void Assembler::mrs(Register dst, SRegister s, Condition cond) {
void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
Condition cond) {
DCHECK((fields & 0x000f0000) != 0); // At least one field must be set.
DCHECK_NE(fields & 0x000f0000, 0); // At least one field must be set.
DCHECK(((fields & 0xfff0ffff) == CPSR) || ((fields & 0xfff0ffff) == SPSR));
Instr instr;
if (src.IsImmediate()) {
@ -2049,7 +2081,7 @@ void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
uint32_t rotate_imm;
uint32_t immed_8;
if (src.MustOutputRelocInfo(this) ||
!FitsShifter(src.immediate(), &rotate_imm, &immed_8, NULL)) {
!FitsShifter(src.immediate(), &rotate_imm, &immed_8, nullptr)) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
// Immediate operand cannot be encoded, load it first to a scratch
@ -2209,7 +2241,7 @@ void Assembler::pld(const MemOperand& address) {
offset = -offset;
U = 0;
}
DCHECK(offset < 4096);
DCHECK_LT(offset, 4096);
emit(kSpecialCondition | B26 | B24 | U | B22 | B20 | address.rn().code()*B16 |
0xf*B12 | offset);
}
@ -2250,7 +2282,7 @@ void Assembler::stm(BlockAddrMode am,
// enabling/disabling and a counter feature. See simulator-arm.h .
void Assembler::stop(const char* msg, Condition cond, int32_t code) {
#ifndef __arm__
DCHECK(code >= kDefaultStopCode);
DCHECK_GE(code, kDefaultStopCode);
{
BlockConstPoolScope block_const_pool(this);
if (code >= 0) {
@ -2419,14 +2451,14 @@ void Assembler::vldr(const DwVfpRegister dst,
DCHECK(VfpRegisterIsAvailable(dst));
int u = 1;
if (offset < 0) {
CHECK(offset != kMinInt);
CHECK_NE(offset, kMinInt);
offset = -offset;
u = 0;
}
int vd, d;
dst.split_code(&vd, &d);
DCHECK(offset >= 0);
DCHECK_GE(offset, 0);
if ((offset % 4) == 0 && (offset / 4) < 256) {
emit(cond | 0xD*B24 | u*B23 | d*B22 | B20 | base.code()*B16 | vd*B12 |
0xB*B8 | ((offset / 4) & 255));
@ -2479,7 +2511,7 @@ void Assembler::vldr(const SwVfpRegister dst,
}
int sd, d;
dst.split_code(&sd, &d);
DCHECK(offset >= 0);
DCHECK_GE(offset, 0);
if ((offset % 4) == 0 && (offset / 4) < 256) {
emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 |
@ -2528,11 +2560,11 @@ void Assembler::vstr(const DwVfpRegister src,
DCHECK(VfpRegisterIsAvailable(src));
int u = 1;
if (offset < 0) {
CHECK(offset != kMinInt);
CHECK_NE(offset, kMinInt);
offset = -offset;
u = 0;
}
DCHECK(offset >= 0);
DCHECK_GE(offset, 0);
int vd, d;
src.split_code(&vd, &d);
@ -2583,13 +2615,13 @@ void Assembler::vstr(const SwVfpRegister src,
// Vdst(15-12) | 1010(11-8) | (offset/4)
int u = 1;
if (offset < 0) {
CHECK(offset != kMinInt);
CHECK_NE(offset, kMinInt);
offset = -offset;
u = 0;
}
int sd, d;
src.split_code(&sd, &d);
DCHECK(offset >= 0);
DCHECK_GE(offset, 0);
if ((offset % 4) == 0 && (offset / 4) < 256) {
emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 |
0xA*B8 | ((offset / 4) & 255));
@ -2638,7 +2670,7 @@ void Assembler::vldm(BlockAddrMode am, Register base, DwVfpRegister first,
int sd, d;
first.split_code(&sd, &d);
int count = last.code() - first.code() + 1;
DCHECK(count <= 16);
DCHECK_LE(count, 16);
emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
0xB*B8 | count*2);
}
@ -2656,7 +2688,7 @@ void Assembler::vstm(BlockAddrMode am, Register base, DwVfpRegister first,
int sd, d;
first.split_code(&sd, &d);
int count = last.code() - first.code() + 1;
DCHECK(count <= 16);
DCHECK_LE(count, 16);
emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
0xB*B8 | count*2);
}
@ -3510,7 +3542,7 @@ void Assembler::vcmp(const DwVfpRegister src1,
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0101(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | 0(5) | 0(4) | 0000(3-0)
DCHECK(VfpRegisterIsAvailable(src1));
DCHECK(src2 == 0.0);
DCHECK_EQ(src2, 0.0);
int vd, d;
src1.split_code(&vd, &d);
emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x5*B16 | vd*B12 | 0x5*B9 | B8 | B6);
@ -3523,7 +3555,7 @@ void Assembler::vcmp(const SwVfpRegister src1, const float src2,
// Instruction details available in ARM DDI 0406C.b, A8-864.
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0101(19-16) |
// Vd(15-12) | 101(11-9) | sz=0(8) | E=0(7) | 1(6) | 0(5) | 0(4) | 0000(3-0)
DCHECK(src2 == 0.0);
DCHECK_EQ(src2, 0.0);
int vd, d;
src1.split_code(&vd, &d);
emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | 0x5 * B16 | vd * B12 |
@ -4940,7 +4972,7 @@ Instr Assembler::GetMovWPattern() { return kMovwPattern; }
Instr Assembler::EncodeMovwImmediate(uint32_t immediate) {
DCHECK(immediate < 0x10000);
DCHECK_LT(immediate, 0x10000);
return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
}
@ -4961,7 +4993,7 @@ int Assembler::DecodeShiftImm(Instr instr) {
Instr Assembler::PatchShiftImm(Instr instr, int immed) {
uint32_t rotate_imm = 0;
uint32_t immed_8 = 0;
bool immed_fits = FitsShifter(immed, &rotate_imm, &immed_8, NULL);
bool immed_fits = FitsShifter(immed, &rotate_imm, &immed_8, nullptr);
DCHECK(immed_fits);
USE(immed_fits);
return (instr & ~kOff12Mask) | (rotate_imm << 8) | immed_8;
@ -4989,7 +5021,7 @@ bool Assembler::IsOrrImmed(Instr instr) {
bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
uint32_t dummy1;
uint32_t dummy2;
return FitsShifter(imm32, &dummy1, &dummy2, NULL);
return FitsShifter(imm32, &dummy1, &dummy2, nullptr);
}
@ -5098,8 +5130,8 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
!emit_debug_code())) {
return;
}
DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
RelocInfo rinfo(pc_, rmode, data, NULL);
DCHECK_GE(buffer_space(), kMaxRelocSize); // too late to grow buffer here
RelocInfo rinfo(pc_, rmode, data, nullptr);
reloc_info_writer.Write(&rinfo);
}
@ -5109,7 +5141,7 @@ void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
rmode != RelocInfo::NONE64);
bool sharing_ok = RelocInfo::IsNone(rmode) ||
(rmode >= RelocInfo::FIRST_SHAREABLE_RELOC_MODE);
DCHECK(pending_32_bit_constants_.size() < kMaxNumPending32Constants);
DCHECK_LT(pending_32_bit_constants_.size(), kMaxNumPending32Constants);
if (pending_32_bit_constants_.empty()) {
first_const_pool_32_use_ = position;
}
@ -5163,7 +5195,7 @@ void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
}
void Assembler::ConstantPoolAddEntry(int position, Double value) {
DCHECK(pending_64_bit_constants_.size() < kMaxNumPending64Constants);
DCHECK_LT(pending_64_bit_constants_.size(), kMaxNumPending64Constants);
if (pending_64_bit_constants_.empty()) {
first_const_pool_64_use_ = position;
}
@ -5439,8 +5471,8 @@ UseScratchRegisterScope::~UseScratchRegisterScope() {
}
Register UseScratchRegisterScope::Acquire() {
DCHECK(available_ != nullptr);
DCHECK(*available_ != 0);
DCHECK_NOT_NULL(available_);
DCHECK_NE(*available_, 0);
int index = static_cast<int>(base::bits::CountTrailingZeros32(*available_));
*available_ &= ~(1UL << index);
return Register::from_code(index);

View File

@ -596,14 +596,15 @@ class Assembler : public AssemblerBase {
// relocation information starting from the end of the buffer. See CodeDesc
// for a detailed comment on the layout (globals.h).
//
// If the provided buffer is NULL, the assembler allocates and grows its own
// buffer, and buffer_size determines the initial buffer size. The buffer is
// owned by the assembler and deallocated upon destruction of the assembler.
// If the provided buffer is nullptr, the assembler allocates and grows its
// own buffer, and buffer_size determines the initial buffer size. The buffer
// is owned by the assembler and deallocated upon destruction of the
// assembler.
//
// If the provided buffer is not NULL, the assembler uses the provided buffer
// for code generation and assumes its size to be buffer_size. If the buffer
// is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler.
// If the provided buffer is not nullptr, the assembler uses the provided
// buffer for code generation and assumes its size to be buffer_size. If the
// buffer is too small, a fatal error occurs. No deallocation of the buffer is
// done upon destruction of the assembler.
Assembler(Isolate* isolate, void* buffer, int buffer_size)
: Assembler(IsolateData(isolate), buffer, buffer_size) {}
Assembler(IsolateData isolate_data, void* buffer, int buffer_size);
@ -1713,7 +1714,7 @@ class Assembler : public AssemblerBase {
void AddrMode5(Instr instr, CRegister crd, const MemOperand& x);
// Labels
void print(Label* L);
void print(const Label* L);
void bind_to(Label* L, int pos);
void next(Label* L);
@ -1724,7 +1725,6 @@ class Assembler : public AssemblerBase {
void ConstantPoolAddEntry(int position, Double value);
friend class RelocInfo;
friend class CodePatcher;
friend class BlockConstPoolScope;
friend class BlockCodeTargetSharingScope;
friend class EnsureSpace;

View File

@ -9,13 +9,11 @@
#include "src/base/bits.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/counters.h"
#include "src/double.h"
#include "src/frame-constants.h"
#include "src/frames.h"
#include "src/heap/heap-inl.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
@ -42,50 +40,30 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
void DoubleToIStub::Generate(MacroAssembler* masm) {
Label out_of_range, only_low, negate, done;
Register input_reg = source();
Label negate, done;
Register result_reg = destination();
DCHECK(is_truncating());
int double_offset = offset();
// Account for saved regs if input is sp.
if (input_reg == sp) double_offset += 3 * kPointerSize;
Register scratch = GetRegisterThatIsNotOneOf(input_reg, result_reg);
Register scratch_low =
GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
Register scratch_high =
GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low);
UseScratchRegisterScope temps(masm);
Register double_low = GetRegisterThatIsNotOneOf(result_reg);
Register double_high = GetRegisterThatIsNotOneOf(result_reg, double_low);
LowDwVfpRegister double_scratch = kScratchDoubleReg;
__ Push(scratch_high, scratch_low, scratch);
// Save the old values from these temporary registers on the stack.
__ Push(double_high, double_low);
if (!skip_fastpath()) {
// Load double input.
__ vldr(double_scratch, MemOperand(input_reg, double_offset));
__ vmov(scratch_low, scratch_high, double_scratch);
// Account for saved regs.
const int kArgumentOffset = 2 * kPointerSize;
// Do fast-path convert from double to int.
__ vcvt_s32_f64(double_scratch.low(), double_scratch);
__ vmov(result_reg, double_scratch.low());
// Load double input.
__ vldr(double_scratch, MemOperand(sp, kArgumentOffset));
__ vmov(double_low, double_high, double_scratch);
// Try to convert with a FPU convert instruction. This handles all
// non-saturating cases.
__ TryInlineTruncateDoubleToI(result_reg, double_scratch, &done);
// If result is not saturated (0x7fffffff or 0x80000000), we are done.
__ sub(scratch, result_reg, Operand(1));
__ cmp(scratch, Operand(0x7ffffffe));
__ b(lt, &done);
} else {
// We've already done MacroAssembler::TryFastTruncatedDoubleToILoad, so we
// know exponent > 31, so we can skip the vcvt_s32_f64 which will saturate.
if (double_offset == 0) {
__ ldm(ia, input_reg, scratch_low.bit() | scratch_high.bit());
} else {
__ ldr(scratch_low, MemOperand(input_reg, double_offset));
__ ldr(scratch_high, MemOperand(input_reg, double_offset + kIntSize));
}
}
__ Ubfx(scratch, scratch_high,
HeapNumber::kExponentShift, HeapNumber::kExponentBits);
Register scratch = temps.Acquire();
__ Ubfx(scratch, double_high, HeapNumber::kExponentShift,
HeapNumber::kExponentBits);
// Load scratch with exponent - 1. This is faster than loading
// with exponent because Bias + 1 = 1024 which is an *ARM* immediate value.
STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
@ -93,85 +71,64 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// If exponent is greater than or equal to 84, the 32 less significant
// bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
// the result is 0.
// Compare exponent with 84 (compare exponent - 1 with 83).
// Compare exponent with 84 (compare exponent - 1 with 83). If the exponent is
// greater than this, the conversion is out of range, so return zero.
__ cmp(scratch, Operand(83));
__ b(ge, &out_of_range);
__ mov(result_reg, Operand::Zero(), LeaveCC, ge);
__ b(ge, &done);
// If we reach this code, 31 <= exponent <= 83.
// So, we don't have to handle cases where 0 <= exponent <= 20 for
// which we would need to shift right the high part of the mantissa.
// If we reach this code, 30 <= exponent <= 83.
// `TryInlineTruncateDoubleToI` above will have truncated any double with an
// exponent lower than 30.
if (masm->emit_debug_code()) {
// Scratch is exponent - 1.
__ cmp(scratch, Operand(30 - 1));
__ Check(ge, kUnexpectedValue);
}
// We don't have to handle cases where 0 <= exponent <= 20 for which we would
// need to shift right the high part of the mantissa.
// Scratch contains exponent - 1.
// Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
__ rsb(scratch, scratch, Operand(51), SetCC);
__ b(ls, &only_low);
// 21 <= exponent <= 51, shift scratch_low and scratch_high
// 52 <= exponent <= 83, shift only double_low.
// On entry, scratch contains: 52 - exponent.
__ rsb(scratch, scratch, Operand::Zero(), LeaveCC, ls);
__ mov(result_reg, Operand(double_low, LSL, scratch), LeaveCC, ls);
__ b(ls, &negate);
// 21 <= exponent <= 51, shift double_low and double_high
// to generate the result.
__ mov(scratch_low, Operand(scratch_low, LSR, scratch));
__ mov(double_low, Operand(double_low, LSR, scratch));
// Scratch contains: 52 - exponent.
// We needs: exponent - 20.
// So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
__ rsb(scratch, scratch, Operand(32));
__ Ubfx(result_reg, scratch_high,
0, HeapNumber::kMantissaBitsInTopWord);
// Set the implicit 1 before the mantissa part in scratch_high.
__ Ubfx(result_reg, double_high, 0, HeapNumber::kMantissaBitsInTopWord);
// Set the implicit 1 before the mantissa part in double_high.
__ orr(result_reg, result_reg,
Operand(1 << HeapNumber::kMantissaBitsInTopWord));
__ orr(result_reg, scratch_low, Operand(result_reg, LSL, scratch));
__ b(&negate);
__ bind(&out_of_range);
__ mov(result_reg, Operand::Zero());
__ b(&done);
__ bind(&only_low);
// 52 <= exponent <= 83, shift only scratch_low.
// On entry, scratch contains: 52 - exponent.
__ rsb(scratch, scratch, Operand::Zero());
__ mov(result_reg, Operand(scratch_low, LSL, scratch));
__ orr(result_reg, double_low, Operand(result_reg, LSL, scratch));
__ bind(&negate);
// If input was positive, scratch_high ASR 31 equals 0 and
// scratch_high LSR 31 equals zero.
// If input was positive, double_high ASR 31 equals 0 and
// double_high LSR 31 equals zero.
// New result = (result eor 0) + 0 = result.
// If the input was negative, we have to negate the result.
// Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
// Input_high ASR 31 equals 0xffffffff and double_high LSR 31 equals 1.
// New result = (result eor 0xffffffff) + 1 = 0 - result.
__ eor(result_reg, result_reg, Operand(scratch_high, ASR, 31));
__ add(result_reg, result_reg, Operand(scratch_high, LSR, 31));
__ eor(result_reg, result_reg, Operand(double_high, ASR, 31));
__ add(result_reg, result_reg, Operand(double_high, LSR, 31));
__ bind(&done);
__ Pop(scratch_high, scratch_low, scratch);
// Restore registers corrupted in this routine and return.
__ Pop(double_high, double_low);
__ Ret();
}
void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
// We don't allow a GC during a store buffer overflow so there is no need to
// store the registers in any particular way, but we do have to store and
// restore them.
__ stm(db_w, sp, kCallerSaved | lr.bit());
const Register scratch = r1;
if (save_doubles()) {
__ SaveFPRegs(sp, scratch);
}
const int argument_count = 1;
const int fp_argument_count = 0;
AllowExternalCallThatCantCauseGC scope(masm);
__ PrepareCallCFunction(argument_count, fp_argument_count);
__ mov(r0, Operand(ExternalReference::isolate_address(isolate())));
__ CallCFunction(
ExternalReference::store_buffer_overflow_function(isolate()),
argument_count);
if (save_doubles()) {
__ RestoreFPRegs(sp, scratch);
}
__ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0).
}
void MathPowStub::Generate(MacroAssembler* masm) {
const Register exponent = MathPowTaggedDescriptor::exponent();
DCHECK(exponent == r2);
@ -263,14 +220,10 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ Ret();
}
bool CEntryStub::NeedsImmovableCode() {
return true;
}
Movability CEntryStub::NeedsImmovableCode() { return kImmovable; }
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
}
@ -280,7 +233,6 @@ void CodeStub::GenerateFPStubs(Isolate* isolate) {
// Generate if not already in cache.
SaveFPRegsMode mode = kSaveFPRegs;
CEntryStub(isolate, 1, mode).GetCode();
StoreBufferOverflowStub(isolate, mode).GetCode();
}
@ -392,7 +344,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
? no_reg
// Callee-saved register r4 still holds argc.
: r4;
__ LeaveExitFrame(save_doubles(), argc, true);
__ LeaveExitFrame(save_doubles(), argc);
__ mov(pc, lr);
// Handling of exception.
@ -400,10 +352,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
ExternalReference pending_handler_context_address(
IsolateAddressId::kPendingHandlerContextAddress, isolate());
ExternalReference pending_handler_code_address(
IsolateAddressId::kPendingHandlerCodeAddress, isolate());
ExternalReference pending_handler_offset_address(
IsolateAddressId::kPendingHandlerOffsetAddress, isolate());
ExternalReference pending_handler_entrypoint_address(
IsolateAddressId::kPendingHandlerEntrypointAddress, isolate());
ExternalReference pending_handler_fp_address(
IsolateAddressId::kPendingHandlerFPAddress, isolate());
ExternalReference pending_handler_sp_address(
@ -437,12 +387,9 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Compute the handler entry address and jump to it.
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
__ mov(r1, Operand(pending_handler_code_address));
__ mov(r1, Operand(pending_handler_entrypoint_address));
__ ldr(r1, MemOperand(r1));
__ mov(r2, Operand(pending_handler_offset_address));
__ ldr(r2, MemOperand(r2));
__ add(r1, r1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
__ add(pc, r1, r2);
__ Jump(r1);
}
@ -605,100 +552,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ ldm(ia_w, sp, kCalleeSaved | pc.bit());
}
void StringHelper::GenerateFlatOneByteStringEquals(
MacroAssembler* masm, Register left, Register right, Register scratch1,
Register scratch2, Register scratch3) {
Register length = scratch1;
// Compare lengths.
Label strings_not_equal, check_zero_length;
__ ldr(length, FieldMemOperand(left, String::kLengthOffset));
__ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
__ cmp(length, scratch2);
__ b(eq, &check_zero_length);
__ bind(&strings_not_equal);
__ mov(r0, Operand(Smi::FromInt(NOT_EQUAL)));
__ Ret();
// Check if the length is zero.
Label compare_chars;
__ bind(&check_zero_length);
STATIC_ASSERT(kSmiTag == 0);
__ cmp(length, Operand::Zero());
__ b(ne, &compare_chars);
__ mov(r0, Operand(Smi::FromInt(EQUAL)));
__ Ret();
// Compare characters.
__ bind(&compare_chars);
GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2, scratch3,
&strings_not_equal);
// Characters are equal.
__ mov(r0, Operand(Smi::FromInt(EQUAL)));
__ Ret();
}
void StringHelper::GenerateCompareFlatOneByteStrings(
MacroAssembler* masm, Register left, Register right, Register scratch1,
Register scratch2, Register scratch3, Register scratch4) {
Label result_not_equal, compare_lengths;
// Find minimum length and length difference.
__ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
__ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
__ sub(scratch3, scratch1, Operand(scratch2), SetCC);
Register length_delta = scratch3;
__ mov(scratch1, scratch2, LeaveCC, gt);
Register min_length = scratch1;
STATIC_ASSERT(kSmiTag == 0);
__ cmp(min_length, Operand::Zero());
__ b(eq, &compare_lengths);
// Compare loop.
GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
scratch4, &result_not_equal);
// Compare lengths - strings up to min-length are equal.
__ bind(&compare_lengths);
DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
// Use length_delta as result if it's zero.
__ mov(r0, Operand(length_delta), SetCC);
__ bind(&result_not_equal);
// Conditionally update the result based either on length_delta or
// the last comparion performed in the loop above.
__ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt);
__ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt);
__ Ret();
}
void StringHelper::GenerateOneByteCharsCompareLoop(
MacroAssembler* masm, Register left, Register right, Register length,
Register scratch1, Register scratch2, Label* chars_not_equal) {
// Change index to run from -length to -1 by adding length to string
// start. This means that loop ends when index reaches zero, which
// doesn't need an additional compare.
__ SmiUntag(length);
__ add(scratch1, length,
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
__ add(left, left, Operand(scratch1));
__ add(right, right, Operand(scratch1));
__ rsb(length, length, Operand::Zero());
Register index = length; // index = -length;
// Compare loop.
Label loop;
__ bind(&loop);
__ ldrb(scratch1, MemOperand(left, index));
__ ldrb(scratch2, MemOperand(right, index));
__ cmp(scratch1, scratch2);
__ b(ne, chars_not_equal);
__ add(index, index, Operand(1), SetCC);
__ b(ne, &loop);
}
void DirectCEntryStub::Generate(MacroAssembler* masm) {
// Place the return address on the stack, making the call
// GC safe. The RegExp backend also relies on this.
@ -718,397 +571,9 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
}
void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
Label* miss,
Label* done,
Register receiver,
Register properties,
Handle<Name> name,
Register scratch0) {
DCHECK(name->IsUniqueName());
// If names of slots in range from 1 to kProbes - 1 for the hash value are
// not equal to the name and kProbes-th slot is not used (its name is the
// undefined value), it guarantees the hash table doesn't contain the
// property. It's true even if some slots represent deleted properties
// (their names are the hole value).
for (int i = 0; i < kInlinedProbes; i++) {
// scratch0 points to properties hash.
// Compute the masked index: (hash + i + i * i) & mask.
Register index = scratch0;
// Capacity is smi 2^n.
__ ldr(index, FieldMemOperand(properties, kCapacityOffset));
__ sub(index, index, Operand(1));
__ and_(index, index, Operand(
Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
// Scale the index by multiplying by the entry size.
STATIC_ASSERT(NameDictionary::kEntrySize == 3);
__ add(index, index, Operand(index, LSL, 1)); // index *= 3.
Register entity_name = scratch0;
// Having undefined at this place means the name is not contained.
STATIC_ASSERT(kSmiTagSize == 1);
Register tmp = properties;
__ add(tmp, properties, Operand(index, LSL, 1));
__ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
DCHECK(tmp != entity_name);
__ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
__ cmp(entity_name, tmp);
__ b(eq, done);
// Load the hole ready for use below:
__ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
// Stop if found the property.
__ cmp(entity_name, Operand(Handle<Name>(name)));
__ b(eq, miss);
Label good;
__ cmp(entity_name, tmp);
__ b(eq, &good);
// Check if the entry name is not a unique name.
__ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
__ ldrb(entity_name,
FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueNameInstanceType(entity_name, miss);
__ bind(&good);
// Restore the properties.
__ ldr(properties,
FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
}
const int spill_mask =
(lr.bit() | r6.bit() | r5.bit() | r4.bit() | r3.bit() |
r2.bit() | r1.bit() | r0.bit());
__ stm(db_w, sp, spill_mask);
__ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
__ mov(r1, Operand(Handle<Name>(name)));
NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
__ CallStub(&stub);
__ cmp(r0, Operand::Zero());
__ ldm(ia_w, sp, spill_mask);
__ b(eq, done);
__ b(ne, miss);
}
void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// This stub overrides SometimesSetsUpAFrame() to return false. That means
// we cannot call anything that could cause a GC from this stub.
// Registers:
// result: NameDictionary to probe
// r1: key
// dictionary: NameDictionary to probe.
// index: will hold an index of entry if lookup is successful.
// might alias with result_.
// Returns:
// result_ is zero if lookup failed, non zero otherwise.
Register result = r0;
Register dictionary = r0;
Register key = r1;
Register index = r2;
Register mask = r3;
Register hash = r4;
Register undefined = r5;
Register entry_key = r6;
Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
__ ldr(mask, FieldMemOperand(dictionary, kCapacityOffset));
__ SmiUntag(mask);
__ sub(mask, mask, Operand(1));
__ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
__ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
for (int i = kInlinedProbes; i < kTotalProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
// Capacity is smi 2^n.
if (i > 0) {
// Add the probe offset (i + i * i) left shifted to avoid right shifting
// the hash in a separate instruction. The value hash + i + i * i is right
// shifted in the following and instruction.
DCHECK(NameDictionary::GetProbeOffset(i) <
1 << (32 - Name::kHashFieldOffset));
__ add(index, hash, Operand(
NameDictionary::GetProbeOffset(i) << Name::kHashShift));
} else {
__ mov(index, Operand(hash));
}
__ and_(index, mask, Operand(index, LSR, Name::kHashShift));
// Scale the index by multiplying by the entry size.
STATIC_ASSERT(NameDictionary::kEntrySize == 3);
__ add(index, index, Operand(index, LSL, 1)); // index *= 3.
STATIC_ASSERT(kSmiTagSize == 1);
__ add(index, dictionary, Operand(index, LSL, 2));
__ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
// Having undefined at this place means the name is not contained.
__ cmp(entry_key, Operand(undefined));
__ b(eq, &not_in_dictionary);
// Stop if found the property.
__ cmp(entry_key, Operand(key));
__ b(eq, &in_dictionary);
if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
// Check if the entry name is not a unique name.
__ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
__ ldrb(entry_key,
FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
}
}
__ bind(&maybe_in_dictionary);
// If we are doing negative lookup then probing failure should be
// treated as a lookup success. For positive lookup probing failure
// should be treated as lookup failure.
if (mode() == POSITIVE_LOOKUP) {
__ mov(result, Operand::Zero());
__ Ret();
}
__ bind(&in_dictionary);
__ mov(result, Operand(1));
__ Ret();
__ bind(&not_in_dictionary);
__ mov(result, Operand::Zero());
__ Ret();
}
void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
Isolate* isolate) {
StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
stub1.GetCode();
// Hydrogen code stubs need stub2 at snapshot time.
StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
stub2.GetCode();
}
RecordWriteStub::Mode RecordWriteStub::GetMode(Code* stub) {
Instr first_instruction = Assembler::instr_at(stub->instruction_start());
Instr second_instruction =
Assembler::instr_at(stub->instruction_start() + Assembler::kInstrSize);
if (Assembler::IsBranch(first_instruction)) {
return INCREMENTAL;
}
DCHECK(Assembler::IsTstImmediate(first_instruction));
if (Assembler::IsBranch(second_instruction)) {
return INCREMENTAL_COMPACTION;
}
DCHECK(Assembler::IsTstImmediate(second_instruction));
return STORE_BUFFER_ONLY;
}
void RecordWriteStub::Patch(Code* stub, Mode mode) {
MacroAssembler masm(stub->GetIsolate(), stub->instruction_start(),
stub->instruction_size(), CodeObjectRequired::kNo);
switch (mode) {
case STORE_BUFFER_ONLY:
DCHECK(GetMode(stub) == INCREMENTAL ||
GetMode(stub) == INCREMENTAL_COMPACTION);
PatchBranchIntoNop(&masm, 0);
PatchBranchIntoNop(&masm, Assembler::kInstrSize);
break;
case INCREMENTAL:
DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
PatchNopIntoBranch(&masm, 0);
break;
case INCREMENTAL_COMPACTION:
DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
PatchNopIntoBranch(&masm, Assembler::kInstrSize);
break;
}
DCHECK(GetMode(stub) == mode);
Assembler::FlushICache(stub->GetIsolate(), stub->instruction_start(),
2 * Assembler::kInstrSize);
}
// Takes the input in 3 registers: address_ value_ and object_. A pointer to
// the value has just been written into the object, now this stub makes sure
// we keep the GC informed. The word in the object where the value has been
// written is in the address register.
void RecordWriteStub::Generate(MacroAssembler* masm) {
Label skip_to_incremental_noncompacting;
Label skip_to_incremental_compacting;
// The first two instructions are generated with labels so as to get the
// offset fixed up correctly by the bind(Label*) call. We patch it back and
// forth between a compare instructions (a nop in this position) and the
// real branch when we start and stop incremental heap marking.
// See RecordWriteStub::Patch for details.
{
// Block literal pool emission, as the position of these two instructions
// is assumed by the patching code.
Assembler::BlockConstPoolScope block_const_pool(masm);
__ b(&skip_to_incremental_noncompacting);
__ b(&skip_to_incremental_compacting);
}
if (remembered_set_action() == EMIT_REMEMBERED_SET) {
__ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
}
__ Ret();
__ bind(&skip_to_incremental_noncompacting);
GenerateIncremental(masm, INCREMENTAL);
__ bind(&skip_to_incremental_compacting);
GenerateIncremental(masm, INCREMENTAL_COMPACTION);
// Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
// Will be checked in IncrementalMarking::ActivateGeneratedStub.
DCHECK(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
DCHECK(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
PatchBranchIntoNop(masm, 0);
PatchBranchIntoNop(masm, Assembler::kInstrSize);
}
void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
regs_.Save(masm);
if (remembered_set_action() == EMIT_REMEMBERED_SET) {
Label dont_need_remembered_set;
__ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
__ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
regs_.scratch0(),
&dont_need_remembered_set);
__ JumpIfInNewSpace(regs_.object(), regs_.scratch0(),
&dont_need_remembered_set);
// First notify the incremental marker if necessary, then update the
// remembered set.
CheckNeedsToInformIncrementalMarker(
masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
InformIncrementalMarker(masm);
regs_.Restore(masm);
__ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
__ bind(&dont_need_remembered_set);
}
CheckNeedsToInformIncrementalMarker(
masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
InformIncrementalMarker(masm);
regs_.Restore(masm);
__ Ret();
}
void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
int argument_count = 3;
__ PrepareCallCFunction(argument_count);
Register address = r0 == regs_.address() ? regs_.scratch0() : regs_.address();
DCHECK(address != regs_.object());
DCHECK(address != r0);
__ Move(address, regs_.address());
__ Move(r0, regs_.object());
__ Move(r1, address);
__ mov(r2, Operand(ExternalReference::isolate_address(isolate())));
AllowExternalCallThatCantCauseGC scope(masm);
__ CallCFunction(
ExternalReference::incremental_marking_record_write_function(isolate()),
argument_count);
regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
}
void RecordWriteStub::Activate(Code* code) {
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
}
void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode) {
Label need_incremental;
Label need_incremental_pop_scratch;
#ifndef V8_CONCURRENT_MARKING
Label on_black;
// Let's look at the color of the object: If it is not black we don't have
// to inform the incremental marker.
__ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
__ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
} else {
__ Ret();
}
__ bind(&on_black);
#endif
// Get the value from the slot.
__ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
if (mode == INCREMENTAL_COMPACTION) {
Label ensure_not_white;
__ CheckPageFlag(regs_.scratch0(), // Contains value.
regs_.scratch1(), // Scratch.
MemoryChunk::kEvacuationCandidateMask,
eq,
&ensure_not_white);
__ CheckPageFlag(regs_.object(),
regs_.scratch1(), // Scratch.
MemoryChunk::kSkipEvacuationSlotsRecordingMask,
eq,
&need_incremental);
__ bind(&ensure_not_white);
}
// We need extra registers for this, so we push the object and the address
// register temporarily.
__ Push(regs_.object(), regs_.address());
__ JumpIfWhite(regs_.scratch0(), // The value.
regs_.scratch1(), // Scratch.
regs_.object(), // Scratch.
regs_.address(), // Scratch.
&need_incremental_pop_scratch);
__ Pop(regs_.object(), regs_.address());
regs_.Restore(masm);
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
__ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode());
} else {
__ Ret();
}
__ bind(&need_incremental_pop_scratch);
__ Pop(regs_.object(), regs_.address());
__ bind(&need_incremental);
// Fall through when we need to inform the incremental marker.
}
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
Zone* zone) {
if (tasm->isolate()->function_entry_hook() != NULL) {
if (tasm->isolate()->function_entry_hook() != nullptr) {
tasm->MaybeCheckConstPool();
PredictableCodeSizeScope predictable(tasm);
predictable.ExpectSize(tasm->CallStubSize() + 2 * Assembler::kInstrSize);
@ -1119,7 +584,7 @@ void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
}
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
if (masm->isolate()->function_entry_hook() != nullptr) {
ProfileEntryHookStub stub(masm->isolate());
masm->MaybeCheckConstPool();
PredictableCodeSizeScope predictable(masm);
@ -1147,7 +612,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// We also save lr, so the count here is one higher than the mask indicates.
const int32_t kNumSavedRegs = 7;
DCHECK((kCallerSaved & kSavedRegs) == kCallerSaved);
DCHECK_EQ(kCallerSaved & kSavedRegs, kCallerSaved);
// Save all caller-save registers as this may be called from anywhere.
__ stm(db_w, sp, kSavedRegs | lr.bit());
@ -1357,7 +822,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map.
__ ldr(r4, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
// Will both indicate a nullptr and a Smi.
__ tst(r4, Operand(kSmiTagMask));
__ Assert(ne, kUnexpectedInitialMapForArrayFunction);
__ CompareObjectType(r4, r4, r5, MAP_TYPE);
@ -1437,7 +902,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map.
__ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
// Will both indicate a nullptr and a Smi.
__ tst(r3, Operand(kSmiTagMask));
__ Assert(ne, kUnexpectedInitialMapForArrayFunction);
__ CompareObjectType(r3, r3, r4, MAP_TYPE);
@ -1485,8 +950,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
ExternalReference thunk_ref,
int stack_space,
MemOperand* stack_space_operand,
MemOperand return_value_operand,
MemOperand* context_restore_operand) {
MemOperand return_value_operand) {
Isolate* isolate = masm->isolate();
ExternalReference next_address =
ExternalReference::handle_scope_next_address(isolate);
@ -1571,17 +1035,13 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// Leave the API exit frame.
__ bind(&leave_exit_frame);
bool restore_context = context_restore_operand != NULL;
if (restore_context) {
__ ldr(cp, *context_restore_operand);
}
// LeaveExitFrame expects unwind space to be in a register.
if (stack_space_operand != NULL) {
if (stack_space_operand != nullptr) {
__ ldr(r4, *stack_space_operand);
} else {
__ mov(r4, Operand(stack_space));
}
__ LeaveExitFrame(false, r4, !restore_context, stack_space_operand != NULL);
__ LeaveExitFrame(false, r4, stack_space_operand != nullptr);
// Check if the function scheduled an exception.
__ LoadRoot(r4, Heap::kTheHoleValueRootIndex);
@ -1610,7 +1070,6 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : callee
// -- r4 : call_data
// -- r2 : holder
// -- r1 : api_function_address
@ -1620,21 +1079,16 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// -- ...
// -- sp[(argc - 1) * 4] : first argument
// -- sp[argc * 4] : receiver
// -- sp[(argc + 1) * 4] : accessor_holder
// -----------------------------------
Register callee = r0;
Register call_data = r4;
Register holder = r2;
Register api_function_address = r1;
Register context = cp;
typedef FunctionCallbackArguments FCA;
STATIC_ASSERT(FCA::kArgsLength == 8);
STATIC_ASSERT(FCA::kNewTargetIndex == 7);
STATIC_ASSERT(FCA::kContextSaveIndex == 6);
STATIC_ASSERT(FCA::kCalleeIndex == 5);
STATIC_ASSERT(FCA::kArgsLength == 6);
STATIC_ASSERT(FCA::kNewTargetIndex == 5);
STATIC_ASSERT(FCA::kDataIndex == 4);
STATIC_ASSERT(FCA::kReturnValueOffset == 3);
STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
@ -1644,12 +1098,6 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// new target
__ PushRoot(Heap::kUndefinedValueRootIndex);
// context save
__ push(context);
// callee
__ push(callee);
// call data
__ push(call_data);
@ -1667,37 +1115,6 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// holder
__ push(holder);
// enter a new context
if (is_lazy()) {
// ----------- S t a t e -------------------------------------
// -- sp[0] : holder
// -- ...
// -- sp[(FCA::kArgsLength - 1) * 4] : new_target
// -- sp[FCA::kArgsLength * 4] : last argument
// -- ...
// -- sp[(FCA::kArgsLength + argc - 1) * 4] : first argument
// -- sp[(FCA::kArgsLength + argc) * 4] : receiver
// -- sp[(FCA::kArgsLength + argc + 1) * 4] : accessor_holder
// -----------------------------------------------------------
// load context from accessor_holder
Register accessor_holder = context;
__ ldr(accessor_holder,
MemOperand(sp, (FCA::kArgsLength + 1 + argc()) * kPointerSize));
// Look for the constructor if |accessor_holder| is not a function.
Label skip_looking_for_constructor;
__ ldr(scratch0, FieldMemOperand(accessor_holder, HeapObject::kMapOffset));
__ ldrb(scratch1, FieldMemOperand(scratch0, Map::kBitFieldOffset));
__ tst(scratch1, Operand(1 << Map::kIsConstructor));
__ b(ne, &skip_looking_for_constructor);
__ GetMapConstructor(context, scratch0, scratch0, scratch1);
__ bind(&skip_looking_for_constructor);
__ ldr(context, FieldMemOperand(context, JSFunction::kContextOffset));
} else {
// load context from callee
__ ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
}
// Prepare arguments.
__ mov(scratch0, sp);
@ -1726,22 +1143,14 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
ExternalReference::invoke_function_callback(masm->isolate());
AllowExternalCallThatCantCauseGC scope(masm);
MemOperand context_restore_operand(
fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
// Stores return the first js argument
int return_value_offset = 0;
if (is_store()) {
return_value_offset = 2 + FCA::kArgsLength;
} else {
return_value_offset = 2 + FCA::kReturnValueOffset;
}
int return_value_offset = 2 + FCA::kReturnValueOffset;
MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
const int stack_space = argc() + FCA::kArgsLength + 2;
const int stack_space = argc() + FCA::kArgsLength + 1;
MemOperand* stack_space_operand = nullptr;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
stack_space_operand, return_value_operand,
&context_restore_operand);
stack_space_operand, return_value_operand);
}
@ -1803,7 +1212,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
MemOperand return_value_operand(
fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
kStackUnwindSpace, NULL, return_value_operand, NULL);
kStackUnwindSpace, nullptr, return_value_operand);
}
#undef __

View File

@ -8,184 +8,6 @@
namespace v8 {
namespace internal {
class StringHelper : public AllStatic {
public:
// Compares two flat one-byte strings and returns result in r0.
static void GenerateCompareFlatOneByteStrings(
MacroAssembler* masm, Register left, Register right, Register scratch1,
Register scratch2, Register scratch3, Register scratch4);
// Compares two flat one-byte strings for equality and returns result in r0.
static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
Register left, Register right,
Register scratch1,
Register scratch2,
Register scratch3);
private:
static void GenerateOneByteCharsCompareLoop(
MacroAssembler* masm, Register left, Register right, Register length,
Register scratch1, Register scratch2, Label* chars_not_equal);
DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
};
class RecordWriteStub: public PlatformCodeStub {
public:
RecordWriteStub(Isolate* isolate,
Register object,
Register value,
Register address,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode)
: PlatformCodeStub(isolate),
regs_(object, // An input reg.
address, // An input reg.
value) { // One scratch reg.
minor_key_ = ObjectBits::encode(object.code()) |
ValueBits::encode(value.code()) |
AddressBits::encode(address.code()) |
RememberedSetActionBits::encode(remembered_set_action) |
SaveFPRegsModeBits::encode(fp_mode);
}
RecordWriteStub(uint32_t key, Isolate* isolate)
: PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
enum Mode {
STORE_BUFFER_ONLY,
INCREMENTAL,
INCREMENTAL_COMPACTION
};
bool SometimesSetsUpAFrame() override { return false; }
static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
masm->instr_at_put(pos, (masm->instr_at(pos) & ~B27) | (B24 | B20));
DCHECK(Assembler::IsTstImmediate(masm->instr_at(pos)));
}
static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
masm->instr_at_put(pos, (masm->instr_at(pos) & ~(B24 | B20)) | B27);
DCHECK(Assembler::IsBranch(masm->instr_at(pos)));
}
static Mode GetMode(Code* stub);
static void Patch(Code* stub, Mode mode);
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
private:
// This is a helper class for freeing up 3 scratch registers. The input is
// two registers that must be preserved and one scratch register provided by
// the caller.
class RegisterAllocation {
public:
RegisterAllocation(Register object, Register address, Register scratch0)
: object_(object),
address_(address),
scratch0_(scratch0),
scratch1_(no_reg) {
DCHECK(!AreAliased(scratch0, object, address, no_reg));
scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
}
void Save(MacroAssembler* masm) {
DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_));
// We don't have to save scratch0_ because it was given to us as
// a scratch register.
masm->push(scratch1_);
}
void Restore(MacroAssembler* masm) {
masm->pop(scratch1_);
}
// If we have to call into C then we need to save and restore all caller-
// saved registers that were not already preserved. The scratch registers
// will be restored by other means so we don't bother pushing them here.
void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
if (mode == kSaveFPRegs) {
masm->SaveFPRegs(sp, scratch0_);
}
}
inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
SaveFPRegsMode mode) {
if (mode == kSaveFPRegs) {
masm->RestoreFPRegs(sp, scratch0_);
}
masm->ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
}
inline Register object() { return object_; }
inline Register address() { return address_; }
inline Register scratch0() { return scratch0_; }
inline Register scratch1() { return scratch1_; }
private:
Register object_;
Register address_;
Register scratch0_;
Register scratch1_;
friend class RecordWriteStub;
};
enum OnNoNeedToInformIncrementalMarker {
kReturnOnNoNeedToInformIncrementalMarker,
kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
};
inline Major MajorKey() const final { return RecordWrite; }
void Generate(MacroAssembler* masm) override;
void GenerateIncremental(MacroAssembler* masm, Mode mode);
void CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm);
void Activate(Code* code) override;
Register object() const {
return Register::from_code(ObjectBits::decode(minor_key_));
}
Register value() const {
return Register::from_code(ValueBits::decode(minor_key_));
}
Register address() const {
return Register::from_code(AddressBits::decode(minor_key_));
}
RememberedSetAction remembered_set_action() const {
return RememberedSetActionBits::decode(minor_key_);
}
SaveFPRegsMode save_fp_regs_mode() const {
return SaveFPRegsModeBits::decode(minor_key_);
}
class ObjectBits: public BitField<int, 0, 4> {};
class ValueBits: public BitField<int, 4, 4> {};
class AddressBits: public BitField<int, 8, 4> {};
class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {};
class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {};
Label slow_;
RegisterAllocation regs_;
DISALLOW_COPY_AND_ASSIGN(RecordWriteStub);
};
// Trampoline stub to call into native code. To call safely into native code
// in the presence of compacting GC (which can move code objects) we need to
// keep the code which called into native pinned in the memory. Currently the
@ -197,52 +19,12 @@ class DirectCEntryStub: public PlatformCodeStub {
void GenerateCall(MacroAssembler* masm, Register target);
private:
bool NeedsImmovableCode() override { return true; }
Movability NeedsImmovableCode() override { return kImmovable; }
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
};
class NameDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
: PlatformCodeStub(isolate) {
minor_key_ = LookupModeBits::encode(mode);
}
static void GenerateNegativeLookup(MacroAssembler* masm,
Label* miss,
Label* done,
Register receiver,
Register properties,
Handle<Name> name,
Register scratch0);
bool SometimesSetsUpAFrame() override { return false; }
private:
static const int kInlinedProbes = 4;
static const int kTotalProbes = 20;
static const int kCapacityOffset =
NameDictionary::kHeaderSize +
NameDictionary::kCapacityIndex * kPointerSize;
static const int kElementsStartOffset =
NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
class LookupModeBits: public BitField<LookupMode, 0, 1> {};
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
};
} // namespace internal
} // namespace v8

View File

@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/arm/codegen-arm.h"
#if V8_TARGET_ARCH_ARM
#include <memory>
@ -16,21 +14,21 @@
namespace v8 {
namespace internal {
#define __ masm.
#if defined(V8_HOST_ARCH_ARM)
MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
MemCopyUint8Function stub) {
#if defined(USE_SIMULATOR)
return stub;
#else
size_t actual_size;
size_t allocated = 0;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return stub;
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
CodeObjectRequired::kNo);
Register dest = r0;
@ -171,8 +169,9 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
Assembler::FlushICache(isolate, buffer, allocated);
CHECK(base::OS::SetPermissions(buffer, allocated,
base::OS::MemoryPermission::kReadExecute));
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
}
@ -184,12 +183,12 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
#if defined(USE_SIMULATOR)
return stub;
#else
size_t actual_size;
size_t allocated = 0;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return stub;
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
CodeObjectRequired::kNo);
Register dest = r0;
@ -261,9 +260,9 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
CodeDesc desc;
masm.GetCode(isolate, &desc);
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
Assembler::FlushICache(isolate, buffer, allocated);
CHECK(base::OS::SetPermissions(buffer, allocated,
base::OS::MemoryPermission::kReadExecute));
return FUNCTION_CAST<MemCopyUint16Uint8Function>(buffer);
#endif
}
@ -273,12 +272,12 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#if defined(USE_SIMULATOR)
return nullptr;
#else
size_t actual_size;
size_t allocated = 0;
byte* buffer =
static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
AllocateSystemPage(isolate->heap()->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
CodeObjectRequired::kNo);
__ MovFromFloatParameter(d0);
@ -290,114 +289,15 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
masm.GetCode(isolate, &desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
Assembler::FlushICache(isolate, buffer, actual_size);
base::OS::ProtectCode(buffer, actual_size);
Assembler::FlushICache(isolate, buffer, allocated);
CHECK(base::OS::SetPermissions(buffer, allocated,
base::OS::MemoryPermission::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#endif
}
#undef __
// -------------------------------------------------------------------------
// Code generators
#define __ ACCESS_MASM(masm)
void StringCharLoadGenerator::Generate(MacroAssembler* masm,
Register string,
Register index,
Register result,
Label* call_runtime) {
Label indirect_string_loaded;
__ bind(&indirect_string_loaded);
// Fetch the instance type of the receiver into result register.
__ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
__ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
// We need special handling for indirect strings.
Label check_sequential;
__ tst(result, Operand(kIsIndirectStringMask));
__ b(eq, &check_sequential);
// Dispatch on the indirect string shape: slice or cons.
Label cons_string, thin_string;
__ and_(result, result, Operand(kStringRepresentationMask));
__ cmp(result, Operand(kConsStringTag));
__ b(eq, &cons_string);
__ cmp(result, Operand(kThinStringTag));
__ b(eq, &thin_string);
// Handle slices.
__ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
__ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
__ add(index, index, Operand::SmiUntag(result));
__ jmp(&indirect_string_loaded);
// Handle thin strings.
__ bind(&thin_string);
__ ldr(string, FieldMemOperand(string, ThinString::kActualOffset));
__ jmp(&indirect_string_loaded);
// Handle cons strings.
// Check whether the right hand side is the empty string (i.e. if
// this is really a flat string in a cons string). If that is not
// the case we would rather go to the runtime system now to flatten
// the string.
__ bind(&cons_string);
__ ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
__ CompareRoot(result, Heap::kempty_stringRootIndex);
__ b(ne, call_runtime);
// Get the first of the two strings and load its instance type.
__ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
__ jmp(&indirect_string_loaded);
// Distinguish sequential and external strings. Only these two string
// representations can reach here (slices and flat cons strings have been
// reduced to the underlying sequential or external string).
Label external_string, check_encoding;
__ bind(&check_sequential);
STATIC_ASSERT(kSeqStringTag == 0);
__ tst(result, Operand(kStringRepresentationMask));
__ b(ne, &external_string);
// Prepare sequential strings
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ add(string,
string,
Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
__ jmp(&check_encoding);
// Handle external strings.
__ bind(&external_string);
if (FLAG_debug_code) {
// Assert that we do not have a cons or slice (indirect strings) here.
// Sequential strings have already been ruled out.
__ tst(result, Operand(kIsIndirectStringMask));
__ Assert(eq, kExternalStringExpectedButNotFound);
}
// Rule out short external strings.
STATIC_ASSERT(kShortExternalStringTag != 0);
__ tst(result, Operand(kShortExternalStringMask));
__ b(ne, call_runtime);
__ ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
Label one_byte, done;
__ bind(&check_encoding);
STATIC_ASSERT(kTwoByteStringTag == 0);
__ tst(result, Operand(kStringEncodingMask));
__ b(ne, &one_byte);
// Two-byte string.
__ ldrh(result, MemOperand(string, index, LSL, 1));
__ jmp(&done);
__ bind(&one_byte);
// One-byte string.
__ ldrb(result, MemOperand(string, index));
__ bind(&done);
}
#undef __
} // namespace internal
} // namespace v8

View File

@ -1,33 +0,0 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_ARM_CODEGEN_ARM_H_
#define V8_ARM_CODEGEN_ARM_H_
#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
class StringCharLoadGenerator : public AllStatic {
public:
// Generates the code for handling different string types and loading the
// indexed character into |result|. We expect |index| as untagged input and
// |result| as untagged output.
static void Generate(MacroAssembler* masm,
Register string,
Register index,
Register result,
Label* call_runtime);
private:
DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
};
} // namespace internal
} // namespace v8
#endif // V8_ARM_CODEGEN_ARM_H_

View File

@ -10,7 +10,7 @@
namespace v8 {
namespace internal {
double Instruction::DoubleImmedVmov() const {
Float64 Instruction::DoubleImmedVmov() const {
// Reconstruct a double from the immediate encoded in the vmov instruction.
//
// instruction: [xxxxxxxx,xxxxabcd,xxxxxxxx,xxxxefgh]
@ -25,9 +25,7 @@ double Instruction::DoubleImmedVmov() const {
high16 |= Bit(19) << 15; // axxxxxxx,xxxxxxxx.
uint64_t imm = high16 << 48;
double d;
memcpy(&d, &imm, 8);
return d;
return Float64::FromBits(imm);
}
@ -41,15 +39,8 @@ const char* Registers::names_[kNumRegisters] = {
// List of alias names which can be used when referring to ARM registers.
const Registers::RegisterAlias Registers::aliases_[] = {
{10, "sl"},
{11, "r11"},
{12, "r12"},
{13, "r13"},
{14, "r14"},
{15, "r15"},
{kNoRegister, NULL}
};
{10, "sl"}, {11, "r11"}, {12, "r12"}, {13, "r13"},
{14, "r14"}, {15, "r15"}, {kNoRegister, nullptr}};
// Support for VFP registers s0 to s31 (d0 to d15) and d16-d31.
// Note that "sN:sM" is the same as "dN/2" up to d15.

View File

@ -9,6 +9,7 @@
#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/boxed-float.h"
#include "src/globals.h"
// ARM EABI is required.
@ -29,7 +30,7 @@ inline int EncodeConstantPoolLength(int length) {
return ((length & 0xfff0) << 4) | (length & 0xf);
}
inline int DecodeConstantPoolLength(int instr) {
DCHECK((instr & kConstantPoolMarkerMask) == kConstantPoolMarker);
DCHECK_EQ(instr & kConstantPoolMarkerMask, kConstantPoolMarker);
return ((instr >> 4) & 0xfff0) | (instr & 0xf);
}
@ -662,7 +663,7 @@ class Instruction {
inline bool HasLink() const { return LinkValue() == 1; }
// Decode the double immediate from a vmov instruction.
double DoubleImmedVmov() const;
Float64 DoubleImmedVmov() const;
// Instructions are read of out a code stream. The only way to get a
// reference to an instruction is to convert a pointer. There is no way

View File

@ -3,7 +3,6 @@
// found in the LICENSE file.
#include "src/assembler-inl.h"
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/objects-inl.h"
#include "src/register-configuration.h"
@ -31,8 +30,8 @@ void Deoptimizer::TableEntryGenerator::Generate() {
const int kFloatRegsSize = kFloatSize * SwVfpRegister::kNumRegisters;
// Save all allocatable VFP registers before messing with them.
DCHECK(kDoubleRegZero.code() == 13);
DCHECK(kScratchDoubleReg.code() == 14);
DCHECK_EQ(kDoubleRegZero.code(), 13);
DCHECK_EQ(kScratchDoubleReg.code(), 14);
{
// We use a run-time check for VFP32DREGS.
@ -107,7 +106,7 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
// Copy core registers into FrameDescription::registers_[kNumRegisters].
DCHECK(Register::kNumRegisters == kNumberOfRegisters);
DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
for (int i = 0; i < kNumberOfRegisters; i++) {
int offset = (i * kPointerSize) + FrameDescription::registers_offset();
__ ldr(r2, MemOperand(sp, i * kPointerSize));

View File

@ -301,7 +301,7 @@ void Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes svc) {
// Handle all register based formatting in this function to reduce the
// complexity of FormatOption.
int Decoder::FormatRegister(Instruction* instr, const char* format) {
DCHECK(format[0] == 'r');
DCHECK_EQ(format[0], 'r');
if (format[1] == 'n') { // 'rn: Rn register
int reg = instr->RnValue();
PrintRegister(reg);
@ -468,7 +468,7 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
return 4;
}
case 'd': { // 'd: vmov double immediate.
double d = instr->DoubleImmedVmov();
double d = instr->DoubleImmedVmov().get_scalar();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "#%g", d);
return 1;
}
@ -479,9 +479,9 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
// BFC/BFI:
// Bits 20-16 represent most-significant bit. Covert to width.
width -= lsbit;
DCHECK(width > 0);
DCHECK_GT(width, 0);
}
DCHECK((width + lsbit) <= 32);
DCHECK_LE(width + lsbit, 32);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"#%d, #%d", lsbit, width);
return 1;
@ -501,7 +501,7 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
DCHECK((width >= 1) && (width <= 32));
DCHECK((lsb >= 0) && (lsb <= 31));
DCHECK((width + lsb) <= 32);
DCHECK_LE(width + lsb, 32);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"%d",
@ -583,7 +583,7 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
if (instr->TypeValue() == 0) {
PrintShiftRm(instr);
} else {
DCHECK(instr->TypeValue() == 1);
DCHECK_EQ(instr->TypeValue(), 1);
PrintShiftImm(instr);
}
return 8;

View File

@ -24,6 +24,11 @@ int InterpreterFrameConstants::RegisterStackSlotCount(int register_count) {
return register_count;
}
int BuiltinContinuationFrameConstants::PaddingSlotCount(int register_count) {
USE(register_count);
return 0;
}
} // namespace internal
} // namespace v8

View File

@ -58,9 +58,6 @@ const Register StoreTransitionDescriptor::SlotRegister() { return r4; }
const Register StoreTransitionDescriptor::VectorRegister() { return r3; }
const Register StoreTransitionDescriptor::MapRegister() { return r5; }
const Register StringCompareDescriptor::LeftRegister() { return r1; }
const Register StringCompareDescriptor::RightRegister() { return r0; }
const Register ApiGetterDescriptor::HolderRegister() { return r0; }
const Register ApiGetterDescriptor::CallbackRegister() { return r3; }
@ -217,7 +214,7 @@ void ArrayConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
Register registers[] = {r1, r3, r0, r2};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
@ -227,7 +224,7 @@ void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
// r1 -- function
// r2 -- allocation site with elements kind
Register registers[] = {r1, r2, r0};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
@ -237,7 +234,7 @@ void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
// r1 -- function
// r2 -- allocation site with elements kind
Register registers[] = {r1, r2, r0};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
@ -287,10 +284,10 @@ void ApiCallbackDescriptor::InitializePlatformSpecific(
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
r0, // callee
r4, // call_data
r2, // holder
r1, // api_function_address
JavaScriptFrame::context_register(), // callee context
r4, // call_data
r2, // holder
r1, // api_function_address
};
data->InitializePlatformSpecific(arraysize(registers), registers,
&default_descriptor);
@ -340,8 +337,7 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r0, // the value to pass to the generator
r1, // the JSGeneratorObject to resume
r2 // the resume mode (tagged)
r1 // the JSGeneratorObject to resume
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}

View File

@ -12,7 +12,7 @@
#include "src/base/utils/random-number-generator.h"
#include "src/bootstrapper.h"
#include "src/callable.h"
#include "src/codegen.h"
#include "src/code-stubs.h"
#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/double.h"
@ -352,8 +352,8 @@ void TurboAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
if (CpuFeatures::IsSupported(NEON)) {
vswp(srcdst0, srcdst1);
} else {
DCHECK(srcdst0 != kScratchDoubleReg);
DCHECK(srcdst1 != kScratchDoubleReg);
DCHECK_NE(srcdst0, kScratchDoubleReg);
DCHECK_NE(srcdst1, kScratchDoubleReg);
vmov(kScratchDoubleReg, srcdst0);
vmov(srcdst0, srcdst1);
vmov(srcdst1, kScratchDoubleReg);
@ -401,7 +401,7 @@ void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
Condition cond) {
DCHECK(lsb < 32);
DCHECK_LT(lsb, 32);
if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
and_(dst, src1, Operand(mask), LeaveCC, cond);
@ -417,7 +417,7 @@ void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
Condition cond) {
DCHECK(lsb < 32);
DCHECK_LT(lsb, 32);
if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
and_(dst, src1, Operand(mask), LeaveCC, cond);
@ -438,7 +438,7 @@ void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
void TurboAssembler::Bfc(Register dst, Register src, int lsb, int width,
Condition cond) {
DCHECK(lsb < 32);
DCHECK_LT(lsb, 32);
if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
bic(dst, src, Operand(mask));
@ -490,14 +490,6 @@ void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
}
void MacroAssembler::InNewSpace(Register object,
Register scratch,
Condition cond,
Label* branch) {
DCHECK(cond == eq || cond == ne);
CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cond, branch);
}
void MacroAssembler::RecordWriteField(Register object, int offset,
Register value, Register dst,
LinkRegisterStatus lr_status,
@ -540,7 +532,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
void TurboAssembler::SaveRegisters(RegList registers) {
DCHECK(NumRegs(registers) > 0);
DCHECK_GT(NumRegs(registers), 0);
RegList regs = 0;
for (int i = 0; i < Register::kNumRegisters; ++i) {
if ((registers >> i) & 1u) {
@ -552,7 +544,7 @@ void TurboAssembler::SaveRegisters(RegList registers) {
}
void TurboAssembler::RestoreRegisters(RegList registers) {
DCHECK(NumRegs(registers) > 0);
DCHECK_GT(NumRegs(registers), 0);
RegList regs = 0;
for (int i = 0; i < Register::kNumRegisters; ++i) {
if ((registers >> i) & 1u) {
@ -645,13 +637,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
if (lr_status == kLRHasNotBeenSaved) {
push(lr);
}
#ifdef V8_CSA_WRITE_BARRIER
CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
#else
RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
fp_mode);
CallStub(&stub);
#endif
if (lr_status == kLRHasNotBeenSaved) {
pop(lr);
}
@ -674,39 +660,6 @@ void MacroAssembler::RecordWrite(Register object, Register address,
}
}
void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Register address, Register scratch,
SaveFPRegsMode fp_mode) {
Label done;
if (emit_debug_code()) {
Label ok;
JumpIfNotInNewSpace(object, scratch, &ok);
stop("Remembered set pointer is in new space");
bind(&ok);
}
// Load store buffer top.
{
UseScratchRegisterScope temps(this);
Register store_buffer = temps.Acquire();
mov(store_buffer, Operand(ExternalReference::store_buffer_top(isolate())));
ldr(scratch, MemOperand(store_buffer));
// Store pointer to buffer and increment buffer top.
str(address, MemOperand(scratch, kPointerSize, PostIndex));
// Write back new top of buffer.
str(scratch, MemOperand(store_buffer));
}
// Call stub on end of buffer.
// Check for end of buffer.
tst(scratch, Operand(StoreBuffer::kStoreBufferMask));
Ret(ne);
push(lr);
StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
CallStub(&store_buffer_overflow);
pop(lr);
bind(&done);
Ret();
}
void TurboAssembler::PushCommonFrame(Register marker_reg) {
if (marker_reg.is_valid()) {
if (marker_reg.code() > fp.code()) {
@ -736,11 +689,11 @@ void TurboAssembler::PushStandardFrame(Register function_reg) {
// Push and pop all registers that can hold pointers.
void MacroAssembler::PushSafepointRegisters() {
// Safepoints expect a block of contiguous register values starting with r0.
DCHECK(kSafepointSavedRegisters == (1 << kNumSafepointSavedRegisters) - 1);
DCHECK_EQ(kSafepointSavedRegisters, (1 << kNumSafepointSavedRegisters) - 1);
// Safepoints expect a block of kNumSafepointRegisters values on the
// stack, so adjust the stack for unsaved registers.
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
DCHECK(num_unsaved >= 0);
DCHECK_GE(num_unsaved, 0);
sub(sp, sp, Operand(num_unsaved * kPointerSize));
stm(db_w, sp, kSafepointSavedRegisters);
}
@ -1267,7 +1220,6 @@ int TurboAssembler::ActivationFrameAlignment() {
void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
bool restore_context,
bool argument_count_is_length) {
ConstantPoolUnavailableScope constant_pool_unavailable(this);
UseScratchRegisterScope temps(this);
@ -1288,11 +1240,9 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
str(r3, MemOperand(scratch));
// Restore current context from top and clear it in debug mode.
if (restore_context) {
mov(scratch, Operand(ExternalReference(IsolateAddressId::kContextAddress,
isolate())));
ldr(cp, MemOperand(scratch));
}
mov(scratch,
Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
ldr(cp, MemOperand(scratch));
#ifdef DEBUG
mov(scratch,
Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
@ -1630,7 +1580,7 @@ void MacroAssembler::CompareObjectType(Register object,
void MacroAssembler::CompareInstanceType(Register map,
Register type_reg,
InstanceType type) {
ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
ldrh(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
cmp(type_reg, Operand(type));
}
@ -1644,31 +1594,6 @@ void MacroAssembler::CompareRoot(Register obj,
cmp(obj, scratch);
}
void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
mov(value, Operand(cell));
ldr(value, FieldMemOperand(value, WeakCell::kValueOffset));
}
void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
Label* miss) {
GetWeakValue(value, cell);
JumpIfSmi(value, miss);
}
void MacroAssembler::GetMapConstructor(Register result, Register map,
Register temp, Register temp2) {
Label done, loop;
ldr(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
bind(&loop);
JumpIfSmi(result, &done);
CompareObjectType(result, temp, temp2, MAP_TYPE);
b(ne, &done);
ldr(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
b(&loop);
bind(&done);
}
void MacroAssembler::CallStub(CodeStub* stub,
Condition cond) {
DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
@ -1715,20 +1640,6 @@ bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
return has_frame() || !stub->SometimesSetsUpAFrame();
}
void MacroAssembler::SmiToDouble(LowDwVfpRegister value, Register smi) {
if (CpuFeatures::IsSupported(VFPv3)) {
CpuFeatureScope scope(this, VFPv3);
vmov(value.low(), smi);
vcvt_f64_s32(value, 1);
} else {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
SmiUntag(scratch, smi);
vmov(value.low(), scratch);
vcvt_f64_s32(value, value.low());
}
}
void MacroAssembler::TryDoubleToInt32Exact(Register result,
DwVfpRegister double_input,
LowDwVfpRegister double_scratch) {
@ -1766,7 +1677,7 @@ void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
sub(sp, sp, Operand(kDoubleSize)); // Put input on stack.
vstr(double_input, MemOperand(sp, 0));
CallStubDelayed(new (zone) DoubleToIStub(nullptr, sp, result, 0, true, true));
CallStubDelayed(new (zone) DoubleToIStub(nullptr, result));
add(sp, sp, Operand(kDoubleSize));
pop(lr);
@ -1823,7 +1734,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
bool builtin_exit_frame) {
#if defined(__thumb__)
// Thumb mode builtin.
DCHECK((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
DCHECK_EQ(reinterpret_cast<intptr_t>(builtin.address()) & 1, 1);
#endif
mov(r1, Operand(builtin));
CEntryStub stub(isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
@ -1833,7 +1744,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
DCHECK(value > 0);
DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
mov(scratch2, Operand(ExternalReference(counter)));
ldr(scratch1, MemOperand(scratch2));
@ -1845,7 +1756,7 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
DCHECK(value > 0);
DCHECK_GT(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
mov(scratch2, Operand(ExternalReference(counter)));
ldr(scratch1, MemOperand(scratch2));
@ -1872,7 +1783,7 @@ void TurboAssembler::Abort(BailoutReason reason) {
bind(&abort_start);
#ifdef DEBUG
const char* msg = GetBailoutReason(reason);
if (msg != NULL) {
if (msg != nullptr) {
RecordComment("Abort message: ");
RecordComment(msg);
}
@ -1901,7 +1812,7 @@ void TurboAssembler::Abort(BailoutReason reason) {
// of the Abort macro constant.
static const int kExpectedAbortInstructions = 7;
int abort_instructions = InstructionsGeneratedSince(&abort_start);
DCHECK(abort_instructions <= kExpectedAbortInstructions);
DCHECK_LE(abort_instructions, kExpectedAbortInstructions);
while (abort_instructions++ < kExpectedAbortInstructions) {
nop();
}
@ -2052,18 +1963,6 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
}
void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
Label* not_unique_name) {
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
Label succeed;
tst(reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
b(eq, &succeed);
cmp(reg, Operand(SYMBOL_TYPE));
b(ne, not_unique_name);
bind(&succeed);
}
void TurboAssembler::CheckFor32DRegs(Register scratch) {
mov(scratch, Operand(ExternalReference::cpu_features()));
ldr(scratch, MemOperand(scratch));
@ -2362,100 +2261,6 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
b(cc, condition_met);
}
void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,
Label* on_black) {
HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
}
void MacroAssembler::HasColor(Register object,
Register bitmap_scratch,
Register mask_scratch,
Label* has_color,
int first_bit,
int second_bit) {
DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
GetMarkBits(object, bitmap_scratch, mask_scratch);
Label other_color, word_boundary;
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
ldr(scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
tst(scratch, Operand(mask_scratch));
b(first_bit == 1 ? eq : ne, &other_color);
// Shift left 1 by adding.
add(mask_scratch, mask_scratch, Operand(mask_scratch), SetCC);
b(eq, &word_boundary);
tst(scratch, Operand(mask_scratch));
b(second_bit == 1 ? ne : eq, has_color);
jmp(&other_color);
bind(&word_boundary);
ldr(scratch,
MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
tst(scratch, Operand(1));
b(second_bit == 1 ? ne : eq, has_color);
bind(&other_color);
}
void MacroAssembler::GetMarkBits(Register addr_reg,
Register bitmap_reg,
Register mask_reg) {
DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
Ubfx(scratch, addr_reg, kLowBits, kPageSizeBits - kLowBits);
add(bitmap_reg, bitmap_reg, Operand(scratch, LSL, kPointerSizeLog2));
mov(scratch, Operand(1));
mov(mask_reg, Operand(scratch, LSL, mask_reg));
}
void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
Register mask_scratch, Register load_scratch,
Label* value_is_white) {
DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch));
GetMarkBits(value, bitmap_scratch, mask_scratch);
// If the value is black or grey we don't need to do anything.
DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
// Since both black and grey have a 1 in the first position and white does
// not have a 1 there we only need to check one bit.
ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
tst(mask_scratch, load_scratch);
b(eq, value_is_white);
}
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
}
void MacroAssembler::LoadAccessor(Register dst, Register holder,
int accessor_index,
AccessorComponent accessor) {
ldr(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
LoadInstanceDescriptors(dst, dst);
ldr(dst,
FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
: AccessorPair::kSetterOffset;
ldr(dst, FieldMemOperand(dst, offset));
}
Register GetRegisterThatIsNotOneOf(Register reg1,
Register reg2,
Register reg3,
@ -2508,51 +2313,6 @@ bool AreAliased(Register reg1,
}
#endif
CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions,
FlushICache flush_cache)
: address_(address),
size_(instructions * Assembler::kInstrSize),
masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo),
flush_cache_(flush_cache) {
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.
DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
}
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
if (flush_cache_ == FLUSH) {
Assembler::FlushICache(masm_.isolate(), address_, size_);
}
// Check that we don't have any pending constant pools.
DCHECK(masm_.pending_32_bit_constants_.empty());
DCHECK(masm_.pending_64_bit_constants_.empty());
// Check that the code was patched as expected.
DCHECK(masm_.pc_ == address_ + size_);
DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
}
void CodePatcher::Emit(Instr instr) {
masm()->emit(instr);
}
void CodePatcher::Emit(Address addr) {
masm()->emit(reinterpret_cast<Instr>(addr));
}
void CodePatcher::EmitCondition(Condition cond) {
Instr instr = Assembler::instr_at(masm_.pc_);
instr = (instr & ~kCondMask) | cond;
masm_.emit(instr);
}
} // namespace internal
} // namespace v8

View File

@ -583,18 +583,6 @@ class MacroAssembler : public TurboAssembler {
void CallDeoptimizer(Address target);
static int CallDeoptimizerSize();
// Emit code that loads |parameter_index|'th parameter from the stack to
// the register according to the CallInterfaceDescriptor definition.
// |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
// below the caller's sp.
template <class Descriptor>
void LoadParameterFromStack(
Register reg, typename Descriptor::ParameterIndices parameter_index,
int sp_to_ra_offset_in_words = 0) {
DCHECK(Descriptor::kPassLastArgsOnStack);
UNIMPLEMENTED();
}
// Swap two registers. If the scratch register is omitted then a slightly
// less efficient form using xor instead of mov is emitted.
void Swap(Register reg1, Register reg2, Register scratch = no_reg,
@ -680,7 +668,6 @@ class MacroAssembler : public TurboAssembler {
// Expect the number of values, pushed prior to the exit frame, to
// remove in a register (or no_reg, if there is nothing to remove).
void LeaveExitFrame(bool save_doubles, Register argument_count,
bool restore_context,
bool argument_count_is_length = false);
// Load the global proxy from the current context.
@ -730,11 +717,6 @@ class MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// Support functions.
// Machine code version of Map::GetConstructor().
// |temp| holds |result|'s map when done, and |temp2| its instance type.
void GetMapConstructor(Register result, Register map, Register temp,
Register temp2);
// Compare object type for heap object. heap_object contains a non-Smi
// whose object type should be compared with the given type. This both
// sets the flags and leaves the object type in the type_reg register.
@ -755,12 +737,6 @@ class MacroAssembler : public TurboAssembler {
Register type_reg,
InstanceType type);
void GetWeakValue(Register value, Handle<WeakCell> cell);
// Load the value of the weak cell in the value register. Branch to the given
// miss label if the weak cell was cleared.
void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
// Compare the object in a register to a value from the root list.
// Acquires a scratch register.
void CompareRoot(Register obj, Heap::RootListIndex index);
@ -784,10 +760,6 @@ class MacroAssembler : public TurboAssembler {
b(ne, if_not_equal);
}
// Load the value of a smi object into a double register.
// The register value must be between d0 and d15.
void SmiToDouble(LowDwVfpRegister value, Register smi);
// Try to convert a double to a signed 32-bit integer.
// Z flag set to one and result assigned if the conversion is exact.
void TryDoubleToInt32Exact(Register result,
@ -876,15 +848,6 @@ class MacroAssembler : public TurboAssembler {
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
// ---------------------------------------------------------------------------
// String utilities
void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
void LoadInstanceDescriptors(Register map, Register descriptors);
void LoadAccessor(Register dst, Register holder, int accessor_index,
AccessorComponent accessor);
template<typename Field>
void DecodeField(Register dst, Register src) {
Ubfx(dst, src, Field::kShift, Field::kSize);
@ -907,13 +870,6 @@ class MacroAssembler : public TurboAssembler {
Condition cond, // eq for new space, ne otherwise.
Label* branch);
// Helper for finding the mark bits for an address. Afterwards, the
// bitmap register points at the word with the mark bits and the mask
// the position of the first bit. Leaves addr_reg unchanged.
inline void GetMarkBits(Register addr_reg,
Register bitmap_reg,
Register mask_reg);
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
@ -922,43 +878,6 @@ class MacroAssembler : public TurboAssembler {
friend class StandardFrame;
};
// The code patcher is used to patch (typically) small parts of code e.g. for
// debugging and other types of instrumentation. When using the code patcher
// the exact number of bytes specified must be emitted. It is not legal to emit
// relocation information. If any of these constraints are violated it causes
// an assertion to fail.
class CodePatcher {
public:
enum FlushICache {
FLUSH,
DONT_FLUSH
};
CodePatcher(Isolate* isolate, byte* address, int instructions,
FlushICache flush_cache = FLUSH);
~CodePatcher();
// Macro assembler to emit code.
MacroAssembler* masm() { return &masm_; }
// Emit an instruction directly.
void Emit(Instr instr);
// Emit an address directly.
void Emit(Address addr);
// Emit the condition part of an instruction leaving the rest of the current
// instruction unchanged.
void EmitCondition(Condition cond);
private:
byte* address_; // The address of the code being patched.
int size_; // Number of bytes of the expected patch size.
MacroAssembler masm_; // Macro assembler used to generate the code.
FlushICache flush_cache_; // Whether to flush the I cache after patching.
};
// -----------------------------------------------------------------------------
// Static helper functions.

View File

@ -14,6 +14,7 @@
#include "src/base/bits.h"
#include "src/codegen.h"
#include "src/disasm.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
#include "src/runtime/runtime-utils.h"
@ -92,7 +93,7 @@ double ArmDebugger::GetRegisterPairDoubleValue(int regnum) {
double ArmDebugger::GetVFPDoubleRegisterValue(int regnum) {
return sim_->get_double_from_d_register(regnum);
return sim_->get_double_from_d_register(regnum).get_scalar();
}
@ -116,7 +117,7 @@ bool ArmDebugger::GetVFPSingleValue(const char* desc, float* value) {
bool is_double;
int regnum = VFPRegisters::Number(desc, &is_double);
if (regnum != kNoRegister && !is_double) {
*value = sim_->get_float_from_s_register(regnum);
*value = sim_->get_float_from_s_register(regnum).get_scalar();
return true;
}
return false;
@ -127,7 +128,7 @@ bool ArmDebugger::GetVFPDoubleValue(const char* desc, double* value) {
bool is_double;
int regnum = VFPRegisters::Number(desc, &is_double);
if (regnum != kNoRegister && is_double) {
*value = sim_->get_double_from_d_register(regnum);
*value = sim_->get_double_from_d_register(regnum).get_scalar();
return true;
}
return false;
@ -136,7 +137,7 @@ bool ArmDebugger::GetVFPDoubleValue(const char* desc, double* value) {
bool ArmDebugger::SetBreakpoint(Instruction* breakpc) {
// Check if a breakpoint can be set. If not return without any side-effects.
if (sim_->break_pc_ != NULL) {
if (sim_->break_pc_ != nullptr) {
return false;
}
@ -150,25 +151,25 @@ bool ArmDebugger::SetBreakpoint(Instruction* breakpc) {
bool ArmDebugger::DeleteBreakpoint(Instruction* breakpc) {
if (sim_->break_pc_ != NULL) {
if (sim_->break_pc_ != nullptr) {
sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
}
sim_->break_pc_ = NULL;
sim_->break_pc_ = nullptr;
sim_->break_instr_ = 0;
return true;
}
void ArmDebugger::UndoBreakpoints() {
if (sim_->break_pc_ != NULL) {
if (sim_->break_pc_ != nullptr) {
sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
}
}
void ArmDebugger::RedoBreakpoints() {
if (sim_->break_pc_ != NULL) {
if (sim_->break_pc_ != nullptr) {
sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
}
}
@ -210,11 +211,11 @@ void ArmDebugger::Debug() {
last_pc = sim_->get_pc();
}
char* line = ReadLine("sim> ");
if (line == NULL) {
if (line == nullptr) {
break;
} else {
char* last_input = sim_->last_debugger_input();
if (strcmp(line, "\n") == 0 && last_input != NULL) {
if (strcmp(line, "\n") == 0 && last_input != nullptr) {
line = last_input;
} else {
// Ownership is transferred to sim_;
@ -305,8 +306,8 @@ void ArmDebugger::Debug() {
PrintF("printobject <value>\n");
}
} else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
int32_t* cur = NULL;
int32_t* end = NULL;
int32_t* cur = nullptr;
int32_t* end = nullptr;
int next_arg = 1;
if (strcmp(cmd, "stack") == 0) {
@ -356,9 +357,9 @@ void ArmDebugger::Debug() {
// use a reasonably large buffer
v8::internal::EmbeddedVector<char, 256> buffer;
byte* prev = NULL;
byte* cur = NULL;
byte* end = NULL;
byte* prev = nullptr;
byte* cur = nullptr;
byte* end = nullptr;
if (argc == 1) {
cur = reinterpret_cast<byte*>(sim_->get_pc());
@ -415,7 +416,7 @@ void ArmDebugger::Debug() {
PrintF("break <address>\n");
}
} else if (strcmp(cmd, "del") == 0) {
if (!DeleteBreakpoint(NULL)) {
if (!DeleteBreakpoint(nullptr)) {
PrintF("deleting breakpoint failed\n");
}
} else if (strcmp(cmd, "flags") == 0) {
@ -552,8 +553,8 @@ void ArmDebugger::Debug() {
static bool ICacheMatch(void* one, void* two) {
DCHECK((reinterpret_cast<intptr_t>(one) & CachePage::kPageMask) == 0);
DCHECK((reinterpret_cast<intptr_t>(two) & CachePage::kPageMask) == 0);
DCHECK_EQ(reinterpret_cast<intptr_t>(one) & CachePage::kPageMask, 0);
DCHECK_EQ(reinterpret_cast<intptr_t>(two) & CachePage::kPageMask, 0);
return one == two;
}
@ -598,7 +599,7 @@ void Simulator::FlushICache(base::CustomMatcherHashMap* i_cache,
CachePage* Simulator::GetCachePage(base::CustomMatcherHashMap* i_cache,
void* page) {
base::HashMap::Entry* entry = i_cache->LookupOrInsert(page, ICacheHash(page));
if (entry->value == NULL) {
if (entry->value == nullptr) {
CachePage* new_page = new CachePage();
entry->value = new_page;
}
@ -609,10 +610,10 @@ CachePage* Simulator::GetCachePage(base::CustomMatcherHashMap* i_cache,
// Flush from start up to and not including start + size.
void Simulator::FlushOnePage(base::CustomMatcherHashMap* i_cache,
intptr_t start, int size) {
DCHECK(size <= CachePage::kPageSize);
DCHECK_LE(size, CachePage::kPageSize);
DCHECK(AllOnOnePage(start, size - 1));
DCHECK((start & CachePage::kLineMask) == 0);
DCHECK((size & CachePage::kLineMask) == 0);
DCHECK_EQ(start & CachePage::kLineMask, 0);
DCHECK_EQ(size & CachePage::kLineMask, 0);
void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
int offset = (start & CachePage::kPageMask);
CachePage* cache_page = GetCachePage(i_cache, page);
@ -653,7 +654,7 @@ void Simulator::Initialize(Isolate* isolate) {
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
i_cache_ = isolate_->simulator_i_cache();
if (i_cache_ == NULL) {
if (i_cache_ == nullptr) {
i_cache_ = new base::CustomMatcherHashMap(&ICacheMatch);
isolate_->set_simulator_i_cache(i_cache_);
}
@ -664,7 +665,7 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
stack_ = reinterpret_cast<char*>(malloc(stack_size));
pc_modified_ = false;
icount_ = 0;
break_pc_ = NULL;
break_pc_ = nullptr;
break_instr_ = 0;
// Set up architecture state.
@ -706,7 +707,7 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
registers_[pc] = bad_lr;
registers_[lr] = bad_lr;
last_debugger_input_ = NULL;
last_debugger_input_ = nullptr;
}
Simulator::~Simulator() {
@ -728,7 +729,7 @@ class Redirection {
: external_function_(external_function),
swi_instruction_(al | (0xf * B24) | kCallRtRedirected),
type_(type),
next_(NULL) {
next_(nullptr) {
next_ = isolate->simulator_redirection();
Simulator::current(isolate)->
FlushICache(isolate->simulator_i_cache(),
@ -747,9 +748,9 @@ class Redirection {
static Redirection* Get(Isolate* isolate, void* external_function,
ExternalReference::Type type) {
Redirection* current = isolate->simulator_redirection();
for (; current != NULL; current = current->next_) {
if (current->external_function_ == external_function) {
DCHECK_EQ(current->type(), type);
for (; current != nullptr; current = current->next_) {
if (current->external_function_ == external_function &&
current->type_ == type) {
return current;
}
}
@ -813,10 +814,10 @@ void* Simulator::RedirectExternalReference(Isolate* isolate,
Simulator* Simulator::current(Isolate* isolate) {
v8::internal::Isolate::PerIsolateThreadData* isolate_data =
isolate->FindOrAllocatePerThreadDataForThisThread();
DCHECK(isolate_data != NULL);
DCHECK_NOT_NULL(isolate_data);
Simulator* sim = isolate_data->simulator();
if (sim == NULL) {
if (sim == nullptr) {
// TODO(146): delete the simulator object when a thread/isolate goes away.
sim = new Simulator(isolate);
isolate_data->set_simulator(sim);
@ -946,28 +947,26 @@ unsigned int Simulator::get_s_register(int sreg) const {
template<class InputType, int register_size>
void Simulator::SetVFPRegister(int reg_index, const InputType& value) {
DCHECK(reg_index >= 0);
unsigned bytes = register_size * sizeof(vfp_registers_[0]);
DCHECK_EQ(sizeof(InputType), bytes);
DCHECK_GE(reg_index, 0);
if (register_size == 1) DCHECK(reg_index < num_s_registers);
if (register_size == 2) DCHECK(reg_index < DwVfpRegister::NumRegisters());
char buffer[register_size * sizeof(vfp_registers_[0])];
memcpy(buffer, &value, register_size * sizeof(vfp_registers_[0]));
memcpy(&vfp_registers_[reg_index * register_size], buffer,
register_size * sizeof(vfp_registers_[0]));
memcpy(&vfp_registers_[reg_index * register_size], &value, bytes);
}
template<class ReturnType, int register_size>
ReturnType Simulator::GetFromVFPRegister(int reg_index) {
DCHECK(reg_index >= 0);
unsigned bytes = register_size * sizeof(vfp_registers_[0]);
DCHECK_EQ(sizeof(ReturnType), bytes);
DCHECK_GE(reg_index, 0);
if (register_size == 1) DCHECK(reg_index < num_s_registers);
if (register_size == 2) DCHECK(reg_index < DwVfpRegister::NumRegisters());
ReturnType value = 0;
char buffer[register_size * sizeof(vfp_registers_[0])];
memcpy(buffer, &vfp_registers_[register_size * reg_index],
register_size * sizeof(vfp_registers_[0]));
memcpy(&value, buffer, register_size * sizeof(vfp_registers_[0]));
ReturnType value;
memcpy(&value, &vfp_registers_[register_size * reg_index], bytes);
return value;
}
@ -1004,8 +1003,8 @@ uint32_t Simulator::GetFromSpecialRegister(SRegister reg) {
// All are consructed here from r0-r3 or d0, d1 and r0.
void Simulator::GetFpArgs(double* x, double* y, int32_t* z) {
if (use_eabi_hardfloat()) {
*x = get_double_from_d_register(0);
*y = get_double_from_d_register(1);
*x = get_double_from_d_register(0).get_scalar();
*y = get_double_from_d_register(1).get_scalar();
*z = get_register(0);
} else {
// Registers 0 and 1 -> x.
@ -1479,7 +1478,7 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
*carry_out = (result & 1) == 1;
result >>= 1;
} else {
DCHECK(shift_amount >= 32);
DCHECK_GE(shift_amount, 32);
if (result < 0) {
*carry_out = true;
result = 0xffffffff;
@ -1502,7 +1501,7 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
*carry_out = (result & 1) == 1;
result = 0;
} else {
DCHECK(shift_amount > 32);
DCHECK_GT(shift_amount, 32);
*carry_out = false;
result = 0;
}
@ -1966,24 +1965,37 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
float Simulator::canonicalizeNaN(float value) {
// Default NaN value, see "NaN handling" in "IEEE 754 standard implementation
// choices" of the ARM Reference Manual.
const uint32_t kDefaultNaN = 0x7FC00000u;
constexpr uint32_t kDefaultNaN = 0x7FC00000u;
if (FPSCR_default_NaN_mode_ && std::isnan(value)) {
value = bit_cast<float>(kDefaultNaN);
}
return value;
}
Float32 Simulator::canonicalizeNaN(Float32 value) {
// Default NaN value, see "NaN handling" in "IEEE 754 standard implementation
// choices" of the ARM Reference Manual.
constexpr Float32 kDefaultNaN = Float32::FromBits(0x7FC00000u);
return FPSCR_default_NaN_mode_ && value.is_nan() ? kDefaultNaN : value;
}
double Simulator::canonicalizeNaN(double value) {
// Default NaN value, see "NaN handling" in "IEEE 754 standard implementation
// choices" of the ARM Reference Manual.
const uint64_t kDefaultNaN = V8_UINT64_C(0x7FF8000000000000);
constexpr uint64_t kDefaultNaN = V8_UINT64_C(0x7FF8000000000000);
if (FPSCR_default_NaN_mode_ && std::isnan(value)) {
value = bit_cast<double>(kDefaultNaN);
}
return value;
}
Float64 Simulator::canonicalizeNaN(Float64 value) {
// Default NaN value, see "NaN handling" in "IEEE 754 standard implementation
// choices" of the ARM Reference Manual.
constexpr Float64 kDefaultNaN =
Float64::FromBits(V8_UINT64_C(0x7FF8000000000000));
return FPSCR_default_NaN_mode_ && value.is_nan() ? kDefaultNaN : value;
}
// Stop helper functions.
bool Simulator::isStopInstruction(Instruction* instr) {
@ -1992,13 +2004,13 @@ bool Simulator::isStopInstruction(Instruction* instr) {
bool Simulator::isWatchedStop(uint32_t code) {
DCHECK(code <= kMaxStopCode);
DCHECK_LE(code, kMaxStopCode);
return code < kNumOfWatchedStops;
}
bool Simulator::isEnabledStop(uint32_t code) {
DCHECK(code <= kMaxStopCode);
DCHECK_LE(code, kMaxStopCode);
// Unwatched stops are always enabled.
return !isWatchedStop(code) ||
!(watched_stops_[code].count & kStopDisabledBit);
@ -2022,7 +2034,7 @@ void Simulator::DisableStop(uint32_t code) {
void Simulator::IncreaseStopCounter(uint32_t code) {
DCHECK(code <= kMaxStopCode);
DCHECK_LE(code, kMaxStopCode);
DCHECK(isWatchedStop(code));
if ((watched_stops_[code].count & ~(1 << 31)) == 0x7fffffff) {
PrintF("Stop counter for code %i has overflowed.\n"
@ -2037,7 +2049,7 @@ void Simulator::IncreaseStopCounter(uint32_t code) {
// Print a stop status.
void Simulator::PrintStopInfo(uint32_t code) {
DCHECK(code <= kMaxStopCode);
DCHECK_LE(code, kMaxStopCode);
if (!isWatchedStop(code)) {
PrintF("Stop not watched.");
} else {
@ -2305,7 +2317,7 @@ void Simulator::DecodeType01(Instruction* instr) {
}
}
if (((instr->Bits(7, 4) & 0xd) == 0xd) && (instr->Bit(20) == 0)) {
DCHECK((rd % 2) == 0);
DCHECK_EQ(rd % 2, 0);
if (instr->HasH()) {
// The strd instruction.
int32_t value1 = get_register(rd);
@ -2416,7 +2428,7 @@ void Simulator::DecodeType01(Instruction* instr) {
if (type == 0) {
shifter_operand = GetShiftRm(instr, &shifter_carry_out);
} else {
DCHECK(instr->TypeValue() == 1);
DCHECK_EQ(instr->TypeValue(), 1);
shifter_operand = GetImm(instr, &shifter_carry_out);
}
int32_t alu_out;
@ -3119,7 +3131,7 @@ void Simulator::DecodeType3(Instruction* instr) {
void Simulator::DecodeType4(Instruction* instr) {
DCHECK(instr->Bit(22) == 0); // only allowed to be set in privileged mode
DCHECK_EQ(instr->Bit(22), 0); // only allowed to be set in privileged mode
if (instr->HasL()) {
// Format(instr, "ldm'cond'pu 'rn'w, 'rlist");
HandleRList(instr, true);
@ -3193,7 +3205,7 @@ void Simulator::DecodeType7(Instruction* instr) {
// vdup.size Qd, Rt.
void Simulator::DecodeTypeVFP(Instruction* instr) {
DCHECK((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
DCHECK(instr->Bits(11, 9) == 0x5);
DCHECK_EQ(instr->Bits(11, 9), 0x5);
// Obtain single precision register codes.
int m = instr->VFPMRegValue(kSinglePrecision);
int d = instr->VFPDRegValue(kSinglePrecision);
@ -3218,28 +3230,32 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
} else if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x3)) {
// vabs
if (instr->SzValue() == 0x1) {
double dm_value = get_double_from_d_register(vm);
double dd_value = std::fabs(dm_value);
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
Float64 dm = get_double_from_d_register(vm);
constexpr uint64_t kSignBit64 = uint64_t{1} << 63;
Float64 dd = Float64::FromBits(dm.get_bits() & ~kSignBit64);
dd = canonicalizeNaN(dd);
set_d_register_from_double(vd, dd);
} else {
float sm_value = get_float_from_s_register(m);
float sd_value = std::fabs(sm_value);
sd_value = canonicalizeNaN(sd_value);
set_s_register_from_float(d, sd_value);
Float32 sm = get_float_from_s_register(m);
constexpr uint32_t kSignBit32 = uint32_t{1} << 31;
Float32 sd = Float32::FromBits(sm.get_bits() & ~kSignBit32);
sd = canonicalizeNaN(sd);
set_s_register_from_float(d, sd);
}
} else if ((instr->Opc2Value() == 0x1) && (instr->Opc3Value() == 0x1)) {
// vneg
if (instr->SzValue() == 0x1) {
double dm_value = get_double_from_d_register(vm);
double dd_value = -dm_value;
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
Float64 dm = get_double_from_d_register(vm);
constexpr uint64_t kSignBit64 = uint64_t{1} << 63;
Float64 dd = Float64::FromBits(dm.get_bits() ^ kSignBit64);
dd = canonicalizeNaN(dd);
set_d_register_from_double(vd, dd);
} else {
float sm_value = get_float_from_s_register(m);
float sd_value = -sm_value;
sd_value = canonicalizeNaN(sd_value);
set_s_register_from_float(d, sd_value);
Float32 sm = get_float_from_s_register(m);
constexpr uint32_t kSignBit32 = uint32_t{1} << 31;
Float32 sd = Float32::FromBits(sm.get_bits() ^ kSignBit32);
sd = canonicalizeNaN(sd);
set_s_register_from_float(d, sd);
}
} else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr);
@ -3262,12 +3278,12 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
// vsqrt
lazily_initialize_fast_sqrt(isolate_);
if (instr->SzValue() == 0x1) {
double dm_value = get_double_from_d_register(vm);
double dm_value = get_double_from_d_register(vm).get_scalar();
double dd_value = fast_sqrt(dm_value, isolate_);
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
float sm_value = get_float_from_s_register(m);
float sm_value = get_float_from_s_register(m).get_scalar();
float sd_value = fast_sqrt(sm_value, isolate_);
sd_value = canonicalizeNaN(sd_value);
set_s_register_from_float(d, sd_value);
@ -3277,17 +3293,19 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
if (instr->SzValue() == 0x1) {
set_d_register_from_double(vd, instr->DoubleImmedVmov());
} else {
set_s_register_from_float(d, instr->DoubleImmedVmov());
// Cast double to float.
float value = instr->DoubleImmedVmov().get_scalar();
set_s_register_from_float(d, value);
}
} else if (((instr->Opc2Value() == 0x6)) && (instr->Opc3Value() == 0x3)) {
// vrintz - truncate
if (instr->SzValue() == 0x1) {
double dm_value = get_double_from_d_register(vm);
double dm_value = get_double_from_d_register(vm).get_scalar();
double dd_value = trunc(dm_value);
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
float sm_value = get_float_from_s_register(m);
float sm_value = get_float_from_s_register(m).get_scalar();
float sd_value = truncf(sm_value);
sd_value = canonicalizeNaN(sd_value);
set_s_register_from_float(d, sd_value);
@ -3299,14 +3317,14 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
if (instr->Opc3Value() & 0x1) {
// vsub
if (instr->SzValue() == 0x1) {
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
double dn_value = get_double_from_d_register(vn).get_scalar();
double dm_value = get_double_from_d_register(vm).get_scalar();
double dd_value = dn_value - dm_value;
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
float sn_value = get_float_from_s_register(n);
float sm_value = get_float_from_s_register(m);
float sn_value = get_float_from_s_register(n).get_scalar();
float sm_value = get_float_from_s_register(m).get_scalar();
float sd_value = sn_value - sm_value;
sd_value = canonicalizeNaN(sd_value);
set_s_register_from_float(d, sd_value);
@ -3314,14 +3332,14 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
} else {
// vadd
if (instr->SzValue() == 0x1) {
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
double dn_value = get_double_from_d_register(vn).get_scalar();
double dm_value = get_double_from_d_register(vm).get_scalar();
double dd_value = dn_value + dm_value;
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
float sn_value = get_float_from_s_register(n);
float sm_value = get_float_from_s_register(m);
float sn_value = get_float_from_s_register(n).get_scalar();
float sm_value = get_float_from_s_register(m).get_scalar();
float sd_value = sn_value + sm_value;
sd_value = canonicalizeNaN(sd_value);
set_s_register_from_float(d, sd_value);
@ -3330,14 +3348,14 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
} else if ((instr->Opc1Value() == 0x2) && !(instr->Opc3Value() & 0x1)) {
// vmul
if (instr->SzValue() == 0x1) {
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
double dn_value = get_double_from_d_register(vn).get_scalar();
double dm_value = get_double_from_d_register(vm).get_scalar();
double dd_value = dn_value * dm_value;
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
float sn_value = get_float_from_s_register(n);
float sm_value = get_float_from_s_register(m);
float sn_value = get_float_from_s_register(n).get_scalar();
float sm_value = get_float_from_s_register(m).get_scalar();
float sd_value = sn_value * sm_value;
sd_value = canonicalizeNaN(sd_value);
set_s_register_from_float(d, sd_value);
@ -3346,48 +3364,46 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
// vmla, vmls
const bool is_vmls = (instr->Opc3Value() & 0x1);
if (instr->SzValue() == 0x1) {
const double dd_val = get_double_from_d_register(vd);
const double dn_val = get_double_from_d_register(vn);
const double dm_val = get_double_from_d_register(vm);
const double dd_val = get_double_from_d_register(vd).get_scalar();
const double dn_val = get_double_from_d_register(vn).get_scalar();
const double dm_val = get_double_from_d_register(vm).get_scalar();
// Note: we do the mul and add/sub in separate steps to avoid getting a
// result with too high precision.
set_d_register_from_double(vd, dn_val * dm_val);
const double res = dn_val * dm_val;
set_d_register_from_double(vd, res);
if (is_vmls) {
set_d_register_from_double(
vd, canonicalizeNaN(dd_val - get_double_from_d_register(vd)));
set_d_register_from_double(vd, canonicalizeNaN(dd_val - res));
} else {
set_d_register_from_double(
vd, canonicalizeNaN(dd_val + get_double_from_d_register(vd)));
set_d_register_from_double(vd, canonicalizeNaN(dd_val + res));
}
} else {
const float sd_val = get_float_from_s_register(d);
const float sn_val = get_float_from_s_register(n);
const float sm_val = get_float_from_s_register(m);
const float sd_val = get_float_from_s_register(d).get_scalar();
const float sn_val = get_float_from_s_register(n).get_scalar();
const float sm_val = get_float_from_s_register(m).get_scalar();
// Note: we do the mul and add/sub in separate steps to avoid getting a
// result with too high precision.
set_s_register_from_float(d, sn_val * sm_val);
const float res = sn_val * sm_val;
set_s_register_from_float(d, res);
if (is_vmls) {
set_s_register_from_float(
d, canonicalizeNaN(sd_val - get_float_from_s_register(d)));
set_s_register_from_float(d, canonicalizeNaN(sd_val - res));
} else {
set_s_register_from_float(
d, canonicalizeNaN(sd_val + get_float_from_s_register(d)));
set_s_register_from_float(d, canonicalizeNaN(sd_val + res));
}
}
} else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) {
// vdiv
if (instr->SzValue() == 0x1) {
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
double dn_value = get_double_from_d_register(vn).get_scalar();
double dm_value = get_double_from_d_register(vm).get_scalar();
double dd_value = dn_value / dm_value;
div_zero_vfp_flag_ = (dm_value == 0);
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
float sn_value = get_float_from_s_register(n);
float sm_value = get_float_from_s_register(m);
float sn_value = get_float_from_s_register(n).get_scalar();
float sm_value = get_float_from_s_register(m).get_scalar();
float sd_value = sn_value / sm_value;
div_zero_vfp_flag_ = (sm_value == 0);
sd_value = canonicalizeNaN(sd_value);
@ -3565,7 +3581,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
void Simulator::DecodeTypeCP15(Instruction* instr) {
DCHECK((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0));
DCHECK(instr->CoprocessorValue() == 15);
DCHECK_EQ(instr->CoprocessorValue(), 15);
if (instr->Bit(4) == 1) {
// mcr
@ -3626,10 +3642,10 @@ void Simulator::DecodeVCMP(Instruction* instr) {
}
if (precision == kDoublePrecision) {
double dd_value = get_double_from_d_register(d);
double dd_value = get_double_from_d_register(d).get_scalar();
double dm_value = 0.0;
if (instr->Opc2Value() == 0x4) {
dm_value = get_double_from_d_register(m);
dm_value = get_double_from_d_register(m).get_scalar();
}
// Raise exceptions for quiet NaNs if necessary.
@ -3641,10 +3657,10 @@ void Simulator::DecodeVCMP(Instruction* instr) {
Compute_FPSCR_Flags(dd_value, dm_value);
} else {
float sd_value = get_float_from_s_register(d);
float sd_value = get_float_from_s_register(d).get_scalar();
float sm_value = 0.0;
if (instr->Opc2Value() == 0x4) {
sm_value = get_float_from_s_register(m);
sm_value = get_float_from_s_register(m).get_scalar();
}
// Raise exceptions for quiet NaNs if necessary.
@ -3674,10 +3690,10 @@ void Simulator::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) {
int src = instr->VFPMRegValue(src_precision);
if (dst_precision == kSinglePrecision) {
double val = get_double_from_d_register(src);
double val = get_double_from_d_register(src).get_scalar();
set_s_register_from_float(dst, static_cast<float>(val));
} else {
float val = get_float_from_s_register(src);
float val = get_float_from_s_register(src).get_scalar();
set_d_register_from_double(dst, static_cast<double>(val));
}
}
@ -3810,8 +3826,8 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
bool unsigned_integer = (instr->Bit(16) == 0);
bool double_precision = (src_precision == kDoublePrecision);
double val = double_precision ? get_double_from_d_register(src)
: get_float_from_s_register(src);
double val = double_precision ? get_double_from_d_register(src).get_scalar()
: get_float_from_s_register(src).get_scalar();
int32_t temp = ConvertDoubleToInt(val, unsigned_integer, mode);
@ -3852,7 +3868,7 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
// Ddst = MEM(Rbase + 4*offset).
// MEM(Rbase + 4*offset) = Dsrc.
void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
DCHECK((instr->TypeValue() == 6));
DCHECK_EQ(instr->TypeValue(), 6);
if (instr->CoprocessorValue() == 0xA) {
switch (instr->OpcodeValue()) {
@ -3870,7 +3886,7 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
int32_t address = get_register(rn) + 4 * offset;
// Load and store address for singles must be at least four-byte
// aligned.
DCHECK((address % 4) == 0);
DCHECK_EQ(address % 4, 0);
if (instr->HasL()) {
// Load single from memory: vldr.
set_s_register_from_sinteger(vd, ReadW(address, instr));
@ -3926,7 +3942,7 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
int32_t address = get_register(rn) + 4 * offset;
// Load and store address for doubles must be at least four-byte
// aligned.
DCHECK((address % 4) == 0);
DCHECK_EQ(address % 4, 0);
if (instr->HasL()) {
// Load double from memory: vldr.
int32_t data[] = {
@ -5561,7 +5577,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
if (instr->SzValue() == 0x1) {
int vm = instr->VFPMRegValue(kDoublePrecision);
int vd = instr->VFPDRegValue(kDoublePrecision);
double dm_value = get_double_from_d_register(vm);
double dm_value = get_double_from_d_register(vm).get_scalar();
double dd_value = 0.0;
int rounding_mode = instr->Bits(17, 16);
switch (rounding_mode) {
@ -5587,7 +5603,7 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
} else {
int m = instr->VFPMRegValue(kSinglePrecision);
int d = instr->VFPDRegValue(kSinglePrecision);
float sm_value = get_float_from_s_register(m);
float sm_value = get_float_from_s_register(m).get_scalar();
float sd_value = 0.0;
int rounding_mode = instr->Bits(17, 16);
switch (rounding_mode) {
@ -5617,8 +5633,8 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
int m = instr->VFPMRegValue(kDoublePrecision);
int n = instr->VFPNRegValue(kDoublePrecision);
int d = instr->VFPDRegValue(kDoublePrecision);
double dn_value = get_double_from_d_register(n);
double dm_value = get_double_from_d_register(m);
double dn_value = get_double_from_d_register(n).get_scalar();
double dm_value = get_double_from_d_register(m).get_scalar();
double dd_value;
if (instr->Bit(6) == 0x1) { // vminnm
if ((dn_value < dm_value) || std::isnan(dm_value)) {
@ -5647,8 +5663,8 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
int m = instr->VFPMRegValue(kSinglePrecision);
int n = instr->VFPNRegValue(kSinglePrecision);
int d = instr->VFPDRegValue(kSinglePrecision);
float sn_value = get_float_from_s_register(n);
float sm_value = get_float_from_s_register(m);
float sn_value = get_float_from_s_register(n).get_scalar();
float sm_value = get_float_from_s_register(m).get_scalar();
float sd_value;
if (instr->Bit(6) == 0x1) { // vminnm
if ((sn_value < sm_value) || std::isnan(sm_value)) {
@ -5704,13 +5720,13 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
int n = instr->VFPNRegValue(kDoublePrecision);
int m = instr->VFPMRegValue(kDoublePrecision);
int d = instr->VFPDRegValue(kDoublePrecision);
double result = get_double_from_d_register(condition_holds ? n : m);
Float64 result = get_double_from_d_register(condition_holds ? n : m);
set_d_register_from_double(d, result);
} else {
int n = instr->VFPNRegValue(kSinglePrecision);
int m = instr->VFPMRegValue(kSinglePrecision);
int d = instr->VFPDRegValue(kSinglePrecision);
float result = get_float_from_s_register(condition_holds ? n : m);
Float32 result = get_float_from_s_register(condition_holds ? n : m);
set_s_register_from_float(d, result);
}
} else {
@ -5884,7 +5900,7 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
// Set up arguments
// First four arguments passed in registers.
DCHECK(argument_count >= 4);
DCHECK_GE(argument_count, 4);
set_register(r0, va_arg(parameters, int32_t));
set_register(r1, va_arg(parameters, int32_t));
set_register(r2, va_arg(parameters, int32_t));
@ -5935,16 +5951,6 @@ int32_t Simulator::CallFPReturnsInt(byte* entry, double d0, double d1) {
}
double Simulator::CallFPReturnsDouble(byte* entry, double d0, double d1) {
CallFP(entry, d0, d1);
if (use_eabi_hardfloat()) {
return get_double_from_d_register(0);
} else {
return get_double_from_register_pair(0);
}
}
uintptr_t Simulator::PushAddress(uintptr_t address) {
int new_sp = get_register(sp) - sizeof(uintptr_t);
uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);

View File

@ -16,6 +16,7 @@
#include "src/allocation.h"
#include "src/base/lazy-instance.h"
#include "src/base/platform/mutex.h"
#include "src/boxed-float.h"
#if !defined(USE_SIMULATOR)
// Running without a simulator on a native arm platform.
@ -158,20 +159,26 @@ class Simulator {
void set_s_register(int reg, unsigned int value);
unsigned int get_s_register(int reg) const;
void set_d_register_from_double(int dreg, const double& dbl) {
void set_d_register_from_double(int dreg, const Float64 dbl) {
SetVFPRegister<Float64, 2>(dreg, dbl);
}
void set_d_register_from_double(int dreg, const double dbl) {
SetVFPRegister<double, 2>(dreg, dbl);
}
double get_double_from_d_register(int dreg) {
return GetFromVFPRegister<double, 2>(dreg);
Float64 get_double_from_d_register(int dreg) {
return GetFromVFPRegister<Float64, 2>(dreg);
}
void set_s_register_from_float(int sreg, const Float32 flt) {
SetVFPRegister<Float32, 1>(sreg, flt);
}
void set_s_register_from_float(int sreg, const float flt) {
SetVFPRegister<float, 1>(sreg, flt);
}
float get_float_from_s_register(int sreg) {
return GetFromVFPRegister<float, 1>(sreg);
Float32 get_float_from_s_register(int sreg) {
return GetFromVFPRegister<Float32, 1>(sreg);
}
void set_s_register_from_sinteger(int sreg, const int sint) {
@ -208,7 +215,6 @@ class Simulator {
// Alternative: call a 2-argument double function.
void CallFP(byte* entry, double d0, double d1);
int32_t CallFPReturnsInt(byte* entry, double d0, double d1);
double CallFPReturnsDouble(byte* entry, double d0, double d1);
// Push an address onto the JS stack.
uintptr_t PushAddress(uintptr_t address);
@ -277,6 +283,8 @@ class Simulator {
void Copy_FPSCR_to_APSR();
inline float canonicalizeNaN(float value);
inline double canonicalizeNaN(double value);
inline Float32 canonicalizeNaN(Float32 value);
inline Float64 canonicalizeNaN(Float64 value);
// Helper functions to decode common "addressing" modes
int32_t GetShiftRm(Instruction* instr, bool* carry_out);

View File

@ -284,7 +284,7 @@ Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
extend_(extend),
shift_amount_(shift_amount) {
DCHECK(reg.IsValid());
DCHECK(shift_amount <= 4);
DCHECK_LE(shift_amount, 4);
DCHECK(!reg.IsSP());
// Extend modes SXTX and UXTX require a 64-bit register.
@ -533,7 +533,7 @@ Address Assembler::target_address_at(Address pc, Address constant_pool) {
Address Assembler::target_address_at(Address pc, Code* code) {
Address constant_pool = code ? code->constant_pool() : NULL;
Address constant_pool = code ? code->constant_pool() : nullptr;
return target_address_at(pc, constant_pool);
}
@ -618,7 +618,7 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
void Assembler::set_target_address_at(Isolate* isolate, Address pc, Code* code,
Address target,
ICacheFlushMode icache_flush_mode) {
Address constant_pool = code ? code->constant_pool() : NULL;
Address constant_pool = code ? code->constant_pool() : nullptr;
set_target_address_at(isolate, pc, constant_pool, target, icache_flush_mode);
}
@ -629,14 +629,13 @@ int RelocInfo::target_address_size() {
Address RelocInfo::target_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
return Assembler::target_address_at(pc_, host_);
}
Address RelocInfo::target_address_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|| rmode_ == EMBEDDED_OBJECT
|| rmode_ == EXTERNAL_REFERENCE);
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_) ||
rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE);
return Assembler::target_pointer_address_at(pc_);
}
@ -665,7 +664,7 @@ void RelocInfo::set_target_object(HeapObject* target,
Assembler::set_target_address_at(target->GetIsolate(), pc_, host_,
reinterpret_cast<Address>(target),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr) {
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
target);
host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
@ -710,9 +709,9 @@ void RelocInfo::WipeOut(Isolate* isolate) {
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_));
if (IsInternalReference(rmode_)) {
Memory::Address_at(pc_) = NULL;
Memory::Address_at(pc_) = nullptr;
} else {
Assembler::set_target_address_at(isolate, pc_, host_, NULL);
Assembler::set_target_address_at(isolate, pc_, host_, nullptr);
}
}
@ -816,7 +815,7 @@ LoadLiteralOp Assembler::LoadLiteralOpFor(const CPURegister& rt) {
int Assembler::LinkAndGetInstructionOffsetTo(Label* label) {
DCHECK(kStartOfLabelLinkChain == 0);
DCHECK_EQ(kStartOfLabelLinkChain, 0);
int offset = LinkAndGetByteOffsetTo(label);
DCHECK(IsAligned(offset, kInstructionSize));
return offset >> kInstructionSizeLog2;
@ -965,7 +964,7 @@ Instr Assembler::ExtendMode(Extend extend) {
Instr Assembler::ImmExtendShift(unsigned left_shift) {
DCHECK(left_shift <= 4);
DCHECK_LE(left_shift, 4);
return left_shift << ImmExtendShift_offset;
}

View File

@ -92,7 +92,7 @@ void CPURegList::RemoveCalleeSaved() {
} else if (type() == CPURegister::kVRegister) {
Remove(GetCalleeSavedV(RegisterSizeInBits()));
} else {
DCHECK(type() == CPURegister::kNoRegister);
DCHECK_EQ(type(), CPURegister::kNoRegister);
DCHECK(IsEmpty());
// The list must already be empty, so do nothing.
}
@ -195,19 +195,16 @@ void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size,
// No icache flushing needed, see comment in set_target_address_at.
}
Register GetAllocatableRegisterThatIsNotOneOf(Register reg1, Register reg2,
Register reg3, Register reg4) {
CPURegList regs(reg1, reg2, reg3, reg4);
const RegisterConfiguration* config = RegisterConfiguration::Default();
for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
int code = config->GetAllocatableDoubleCode(i);
Register candidate = Register::from_code(code);
if (regs.IncludesAliasOf(candidate)) continue;
return candidate;
}
UNREACHABLE();
void RelocInfo::set_js_to_wasm_address(Isolate* isolate, Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
set_embedded_address(isolate, address, icache_flush_mode);
}
Address RelocInfo::js_to_wasm_address() const {
DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
return embedded_address();
}
bool AreAliased(const CPURegister& reg1, const CPURegister& reg2,
const CPURegister& reg3, const CPURegister& reg4,
@ -361,7 +358,7 @@ bool ConstPool::RecordEntry(intptr_t data, RelocInfo::Mode mode) {
int ConstPool::DistanceToFirstUse() {
DCHECK(first_use_ >= 0);
DCHECK_GE(first_use_, 0);
return assm_->pc_offset() - first_use_;
}
@ -497,8 +494,8 @@ MemOperand::PairResult MemOperand::AreConsistentForPair(
const MemOperand& operandA,
const MemOperand& operandB,
int access_size_log2) {
DCHECK(access_size_log2 >= 0);
DCHECK(access_size_log2 <= 3);
DCHECK_GE(access_size_log2, 0);
DCHECK_LE(access_size_log2, 3);
// Step one: check that they share the same base, that the mode is Offset
// and that the offset is a multiple of access size.
if (!operandA.base().Is(operandB.base()) ||
@ -699,7 +696,7 @@ void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch,
// The branch is in the middle of the chain.
if (prev_link->IsTargetInImmPCOffsetRange(next_link)) {
prev_link->SetImmPCOffsetTarget(isolate_data(), next_link);
} else if (label_veneer != NULL) {
} else if (label_veneer != nullptr) {
// Use the veneer for all previous links in the chain.
prev_link->SetImmPCOffsetTarget(isolate_data(), prev_link);
@ -768,11 +765,11 @@ void Assembler::bind(Label* label) {
CheckLabelLinkChain(label);
DCHECK(linkoffset >= 0);
DCHECK_GE(linkoffset, 0);
DCHECK(linkoffset < pc_offset());
DCHECK((linkoffset > prevlinkoffset) ||
(linkoffset - prevlinkoffset == kStartOfLabelLinkChain));
DCHECK(prevlinkoffset >= 0);
DCHECK_GE(prevlinkoffset, 0);
// Update the link to point to the label.
if (link->IsUnresolvedInternalReference()) {
@ -804,7 +801,7 @@ void Assembler::bind(Label* label) {
int Assembler::LinkAndGetByteOffsetTo(Label* label) {
DCHECK(sizeof(*pc_) == 1);
DCHECK_EQ(sizeof(*pc_), 1);
CheckLabelLinkChain(label);
int offset;
@ -819,7 +816,7 @@ int Assembler::LinkAndGetByteOffsetTo(Label* label) {
// Note that offset can be zero for self-referential instructions. (This
// could be useful for ADR, for example.)
offset = label->pos() - pc_offset();
DCHECK(offset <= 0);
DCHECK_LE(offset, 0);
} else {
if (label->is_linked()) {
// The label is linked, so the referring instruction should be added onto
@ -828,7 +825,7 @@ int Assembler::LinkAndGetByteOffsetTo(Label* label) {
// In this case, label->pos() returns the offset of the last linked
// instruction from the start of the buffer.
offset = label->pos() - pc_offset();
DCHECK(offset != kStartOfLabelLinkChain);
DCHECK_NE(offset, kStartOfLabelLinkChain);
// Note that the offset here needs to be PC-relative only so that the
// first instruction in a buffer can link to an unbound label. Otherwise,
// the offset would be 0 for this case, and 0 is reserved for
@ -883,7 +880,7 @@ void Assembler::DeleteUnresolvedBranchInfoForLabelTraverse(Label* label) {
void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) {
if (unresolved_branches_.empty()) {
DCHECK(next_veneer_pool_check_ == kMaxInt);
DCHECK_EQ(next_veneer_pool_check_, kMaxInt);
return;
}
@ -1635,7 +1632,7 @@ void Assembler::LoadStorePair(const CPURegister& rt,
// Pre-index and post-index modes.
DCHECK(!rt.Is(addr.base()));
DCHECK(!rt2.Is(addr.base()));
DCHECK(addr.offset() != 0);
DCHECK_NE(addr.offset(), 0);
if (addr.IsPreIndex()) {
addrmodeop = LoadStorePairPreIndexFixed;
} else {
@ -1761,6 +1758,7 @@ void Assembler::stlxr(const Register& rs, const Register& rt,
const Register& rn) {
DCHECK(rs.Is32Bits());
DCHECK(rn.Is64Bits());
DCHECK(!rs.Is(rt) && !rs.Is(rn));
LoadStoreAcquireReleaseOp op = rt.Is32Bits() ? STLXR_w : STLXR_x;
Emit(op | Rs(rs) | Rt2(x31) | RnSP(rn) | Rt(rt));
}
@ -1788,6 +1786,7 @@ void Assembler::stlxrb(const Register& rs, const Register& rt,
DCHECK(rs.Is32Bits());
DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits());
DCHECK(!rs.Is(rt) && !rs.Is(rn));
Emit(STLXR_b | Rs(rs) | Rt2(x31) | RnSP(rn) | Rt(rt));
}
@ -1814,6 +1813,7 @@ void Assembler::stlxrh(const Register& rs, const Register& rt,
DCHECK(rs.Is32Bits());
DCHECK(rt.Is32Bits());
DCHECK(rn.Is64Bits());
DCHECK(!rs.Is(rt) && !rs.Is(rn));
Emit(STLXR_h | Rs(rs) | Rt2(x31) | RnSP(rn) | Rt(rt));
}
@ -3917,7 +3917,7 @@ void Assembler::dcptr(Label* label) {
// In this case, label->pos() returns the offset of the last linked
// instruction from the start of the buffer.
offset = label->pos() - pc_offset();
DCHECK(offset != kStartOfLabelLinkChain);
DCHECK_NE(offset, kStartOfLabelLinkChain);
} else {
// The label is unused, so it now becomes linked and the internal
// reference is at the start of the new link chain.
@ -4064,7 +4064,7 @@ void Assembler::EmitStringData(const char* string) {
size_t len = strlen(string) + 1;
DCHECK_LE(RoundUp(len, kInstructionSize), static_cast<size_t>(kGap));
EmitData(string, static_cast<int>(len));
// Pad with NULL characters until pc_ is aligned.
// Pad with nullptr characters until pc_ is aligned.
const char pad[] = {'\0', '\0', '\0', '\0'};
static_assert(sizeof(pad) == kInstructionSize,
"Size of padding must match instruction size.");
@ -4087,11 +4087,11 @@ void Assembler::debug(const char* message, uint32_t code, Instr params) {
// Refer to instructions-arm64.h for a description of the marker and its
// arguments.
hlt(kImmExceptionIsDebug);
DCHECK(SizeOfCodeGeneratedSince(&start) == kDebugCodeOffset);
DCHECK_EQ(SizeOfCodeGeneratedSince(&start), kDebugCodeOffset);
dc32(code);
DCHECK(SizeOfCodeGeneratedSince(&start) == kDebugParamsOffset);
DCHECK_EQ(SizeOfCodeGeneratedSince(&start), kDebugParamsOffset);
dc32(params);
DCHECK(SizeOfCodeGeneratedSince(&start) == kDebugMessageOffset);
DCHECK_EQ(SizeOfCodeGeneratedSince(&start), kDebugMessageOffset);
EmitStringData(message);
hlt(kImmExceptionIsUnreachable);
@ -4116,8 +4116,8 @@ void Assembler::Logical(const Register& rd,
int64_t immediate = operand.ImmediateValue();
unsigned reg_size = rd.SizeInBits();
DCHECK(immediate != 0);
DCHECK(immediate != -1);
DCHECK_NE(immediate, 0);
DCHECK_NE(immediate, -1);
DCHECK(rd.Is64Bits() || is_uint32(immediate));
// If the operation is NOT, invert the operation and immediate.
@ -4300,7 +4300,7 @@ void Assembler::EmitExtendShift(const Register& rd,
case SXTW: sbfm(rd, rn_, non_shift_bits, high_bit); break;
case UXTX:
case SXTX: {
DCHECK(rn.SizeInBits() == kXRegSizeInBits);
DCHECK_EQ(rn.SizeInBits(), kXRegSizeInBits);
// Nothing to extend. Just shift.
lsl(rd, rn_, left_shift);
break;
@ -4438,7 +4438,7 @@ bool Assembler::IsImmLogical(uint64_t value,
unsigned* n,
unsigned* imm_s,
unsigned* imm_r) {
DCHECK((n != NULL) && (imm_s != NULL) && (imm_r != NULL));
DCHECK((n != nullptr) && (imm_s != nullptr) && (imm_r != nullptr));
DCHECK((width == kWRegSizeInBits) || (width == kXRegSizeInBits));
bool negate = false;
@ -4748,7 +4748,7 @@ void Assembler::GrowBuffer() {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
// We do not try to reuse pool constants.
RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, NULL);
RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, nullptr);
bool write_reloc_info = true;
if ((rmode == RelocInfo::COMMENT) ||
@ -4776,7 +4776,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
!serializer_enabled() && !emit_debug_code()) {
return;
}
DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
DCHECK_GE(buffer_space(), kMaxRelocSize); // too late to grow buffer here
reloc_info_writer.Write(&rinfo);
}
}
@ -4862,7 +4862,7 @@ bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) {
void Assembler::RecordVeneerPool(int location_offset, int size) {
RelocInfo rinfo(buffer_ + location_offset, RelocInfo::VENEER_POOL,
static_cast<intptr_t>(size), NULL);
static_cast<intptr_t>(size), nullptr);
reloc_info_writer.Write(&rinfo);
}
@ -4940,7 +4940,7 @@ void Assembler::CheckVeneerPool(bool force_emit, bool require_jump,
int margin) {
// There is nothing to do if there are no pending veneer pool entries.
if (unresolved_branches_.empty()) {
DCHECK(next_veneer_pool_check_ == kMaxInt);
DCHECK_EQ(next_veneer_pool_check_, kMaxInt);
return;
}
@ -5008,7 +5008,7 @@ void PatchingAssembler::PatchAdrFar(int64_t target_offset) {
adr(rd, target_offset & 0xFFFF);
movz(scratch, (target_offset >> 16) & 0xFFFF, 16);
movk(scratch, (target_offset >> 32) & 0xFFFF, 32);
DCHECK((target_offset >> 48) == 0);
DCHECK_EQ(target_offset >> 48, 0);
add(rd, rd, scratch);
}

View File

@ -132,7 +132,7 @@ class CPURegister : public RegisterBase<CPURegister, kRegAfterLast> {
}
int SizeInBytes() const {
DCHECK(IsValid());
DCHECK(SizeInBits() % 8 == 0);
DCHECK_EQ(SizeInBits() % 8, 0);
return reg_size_ / 8;
}
bool Is8Bits() const {
@ -280,6 +280,12 @@ class Register : public CPURegister {
return Register::Create(code, kXRegSizeInBits);
}
template <int code>
static Register from_code() {
// Always return an X register.
return Register::Create<code, kXRegSizeInBits>();
}
// End of V8 compatibility section -----------------------
//
private:
@ -503,13 +509,6 @@ ALIAS_REGISTER(VRegister, fp_scratch2, d31);
#undef ALIAS_REGISTER
Register GetAllocatableRegisterThatIsNotOneOf(Register reg1,
Register reg2 = NoReg,
Register reg3 = NoReg,
Register reg4 = NoReg);
// AreAliased returns true if any of the named registers overlap. Arguments set
// to NoReg are ignored. The system stack pointer may be specified.
bool AreAliased(const CPURegister& reg1,
@ -671,7 +670,7 @@ class CPURegList {
int RegisterSizeInBytes() const {
int size_in_bits = RegisterSizeInBits();
DCHECK((size_in_bits % kBitsPerByte) == 0);
DCHECK_EQ(size_in_bits % kBitsPerByte, 0);
return size_in_bits / kBitsPerByte;
}
@ -935,14 +934,15 @@ class Assembler : public AssemblerBase {
// relocation information starting from the end of the buffer. See CodeDesc
// for a detailed comment on the layout (globals.h).
//
// If the provided buffer is NULL, the assembler allocates and grows its own
// buffer, and buffer_size determines the initial buffer size. The buffer is
// owned by the assembler and deallocated upon destruction of the assembler.
// If the provided buffer is nullptr, the assembler allocates and grows its
// own buffer, and buffer_size determines the initial buffer size. The buffer
// is owned by the assembler and deallocated upon destruction of the
// assembler.
//
// If the provided buffer is not NULL, the assembler uses the provided buffer
// for code generation and assumes its size to be buffer_size. If the buffer
// is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler.
// If the provided buffer is not nullptr, the assembler uses the provided
// buffer for code generation and assumes its size to be buffer_size. If the
// buffer is too small, a fatal error occurs. No deallocation of the buffer is
// done upon destruction of the assembler.
Assembler(Isolate* isolate, void* buffer, int buffer_size)
: Assembler(IsolateData(isolate), buffer, buffer_size) {}
Assembler(IsolateData isolate_data, void* buffer, int buffer_size);
@ -965,8 +965,8 @@ class Assembler : public AssemblerBase {
// desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls.
//
// The descriptor (desc) can be NULL. In that case, the code is finalized as
// usual, but the descriptor is not populated.
// The descriptor (desc) can be nullptr. In that case, the code is finalized
// as usual, but the descriptor is not populated.
void GetCode(Isolate* isolate, CodeDesc* desc);
// Insert the smallest number of nop instructions
@ -1064,7 +1064,7 @@ class Assembler : public AssemblerBase {
// TODO(jbramley): Work out what sign to use for these things and if possible,
// change things to be consistent.
void AssertSizeOfCodeGeneratedSince(const Label* label, ptrdiff_t size) {
DCHECK(size >= 0);
DCHECK_GE(size, 0);
DCHECK(static_cast<uint64_t>(size) == SizeOfCodeGeneratedSince(label));
}
@ -1408,14 +1408,14 @@ class Assembler : public AssemblerBase {
// Bfm aliases.
// Bitfield insert.
void bfi(const Register& rd, const Register& rn, int lsb, int width) {
DCHECK(width >= 1);
DCHECK_GE(width, 1);
DCHECK(lsb + width <= rn.SizeInBits());
bfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
}
// Bitfield extract and insert low.
void bfxil(const Register& rd, const Register& rn, int lsb, int width) {
DCHECK(width >= 1);
DCHECK_GE(width, 1);
DCHECK(lsb + width <= rn.SizeInBits());
bfm(rd, rn, lsb, lsb + width - 1);
}
@ -1429,14 +1429,14 @@ class Assembler : public AssemblerBase {
// Signed bitfield insert in zero.
void sbfiz(const Register& rd, const Register& rn, int lsb, int width) {
DCHECK(width >= 1);
DCHECK_GE(width, 1);
DCHECK(lsb + width <= rn.SizeInBits());
sbfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
}
// Signed bitfield extract.
void sbfx(const Register& rd, const Register& rn, int lsb, int width) {
DCHECK(width >= 1);
DCHECK_GE(width, 1);
DCHECK(lsb + width <= rn.SizeInBits());
sbfm(rd, rn, lsb, lsb + width - 1);
}
@ -1472,14 +1472,14 @@ class Assembler : public AssemblerBase {
// Unsigned bitfield insert in zero.
void ubfiz(const Register& rd, const Register& rn, int lsb, int width) {
DCHECK(width >= 1);
DCHECK_GE(width, 1);
DCHECK(lsb + width <= rn.SizeInBits());
ubfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
}
// Unsigned bitfield extract.
void ubfx(const Register& rd, const Register& rn, int lsb, int width) {
DCHECK(width >= 1);
DCHECK_GE(width, 1);
DCHECK(lsb + width <= rn.SizeInBits());
ubfm(rd, rn, lsb, lsb + width - 1);
}
@ -2872,9 +2872,9 @@ class Assembler : public AssemblerBase {
// Emit an address in the instruction stream.
void dcptr(Label* label);
// Copy a string into the instruction stream, including the terminating NULL
// character. The instruction pointer (pc_) is then aligned correctly for
// subsequent instructions.
// Copy a string into the instruction stream, including the terminating
// nullptr character. The instruction pointer (pc_) is then aligned correctly
// for subsequent instructions.
void EmitStringData(const char* string);
// Pseudo-instructions ------------------------------------------------------
@ -3353,9 +3353,8 @@ class Assembler : public AssemblerBase {
// Remove the specified branch from the unbound label link chain.
// If available, a veneer for this label can be used for other branches in the
// chain if the link chain cannot be fixed up without this branch.
void RemoveBranchFromLabelLinkChain(Instruction* branch,
Label* label,
Instruction* label_veneer = NULL);
void RemoveBranchFromLabelLinkChain(Instruction* branch, Label* label,
Instruction* label_veneer = nullptr);
// Prevent sharing of code target constant pool entries until
// EndBlockCodeTargetSharing is called. Calls to this function can be nested
@ -3497,7 +3496,7 @@ class Assembler : public AssemblerBase {
// Emit data inline in the instruction stream.
void EmitData(void const * data, unsigned size) {
DCHECK(sizeof(*pc_) == 1);
DCHECK_EQ(sizeof(*pc_), 1);
DCHECK((pc_ + size) <= (buffer_ + buffer_size_));
// TODO(all): Somehow register we have some data here. Then we can

View File

@ -9,12 +9,10 @@
#include "src/arm64/macro-assembler-arm64-inl.h"
#include "src/bootstrapper.h"
#include "src/code-stubs.h"
#include "src/codegen.h"
#include "src/counters.h"
#include "src/frame-constants.h"
#include "src/frames.h"
#include "src/heap/heap-inl.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate.h"
@ -41,35 +39,21 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
void DoubleToIStub::Generate(MacroAssembler* masm) {
Label done;
Register input = source();
Register result = destination();
DCHECK(is_truncating());
DCHECK(result.Is64Bits());
DCHECK(jssp.Is(masm->StackPointer()));
int double_offset = offset();
UseScratchRegisterScope temps(masm);
Register scratch1 = temps.AcquireX();
Register scratch2 = temps.AcquireX();
DoubleRegister double_scratch = temps.AcquireD();
DoubleRegister double_scratch = d0; // only used if !skip_fastpath()
Register scratch1 = GetAllocatableRegisterThatIsNotOneOf(input, result);
Register scratch2 =
GetAllocatableRegisterThatIsNotOneOf(input, result, scratch1);
__ Push(scratch1, scratch2);
// Account for saved regs if input is jssp.
if (input.is(jssp)) double_offset += 2 * kPointerSize;
if (!skip_fastpath()) {
__ Push(double_scratch);
if (input.is(jssp)) double_offset += 1 * kDoubleSize;
__ Ldr(double_scratch, MemOperand(input, double_offset));
// Try to convert with a FPU convert instruction. This handles all
// non-saturating cases.
__ TryConvertDoubleToInt64(result, double_scratch, &done);
__ Fmov(result, double_scratch);
} else {
__ Ldr(result, MemOperand(input, double_offset));
}
__ Peek(double_scratch, 0);
// Try to convert with a FPU convert instruction. This handles all
// non-saturating cases.
__ TryConvertDoubleToInt64(result, double_scratch, &done);
__ Fmov(result, double_scratch);
// If we reach here we need to manually convert the input to an int32.
@ -110,55 +94,10 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
__ Lsl(result, mantissa, exponent);
__ Bind(&done);
if (!skip_fastpath()) {
__ Pop(double_scratch);
}
__ Pop(scratch2, scratch1);
__ Ret();
}
void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
CPURegList saved_regs = kCallerSaved;
CPURegList saved_fp_regs = kCallerSavedV;
// We don't allow a GC during a store buffer overflow so there is no need to
// store the registers in any particular way, but we do have to store and
// restore them.
// We don't care if MacroAssembler scratch registers are corrupted.
saved_regs.Remove(*(masm->TmpList()));
saved_fp_regs.Remove(*(masm->FPTmpList()));
DCHECK_EQ(saved_regs.Count() % 2, 0);
DCHECK_EQ(saved_fp_regs.Count() % 2, 0);
__ PushCPURegList(saved_regs);
if (save_doubles()) {
__ PushCPURegList(saved_fp_regs);
}
AllowExternalCallThatCantCauseGC scope(masm);
__ Mov(x0, ExternalReference::isolate_address(isolate()));
__ CallCFunction(
ExternalReference::store_buffer_overflow_function(isolate()), 1, 0);
if (save_doubles()) {
__ PopCPURegList(saved_fp_regs);
}
__ PopCPURegList(saved_regs);
__ Ret();
}
void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
Isolate* isolate) {
StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
stub1.GetCode();
StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
stub2.GetCode();
}
void MathPowStub::Generate(MacroAssembler* masm) {
// Stack on entry:
// jssp[0]: Exponent (as a tagged value).
@ -284,10 +223,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
// It is important that the following stubs are generated in this order
// because pregenerated stubs can only call other pregenerated stubs.
// RecordWriteStub uses StoreBufferOverflowStub, which in turn uses
// CEntryStub.
CEntryStub::GenerateAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
StoreFastElementStub::GenerateAheadOfTime(isolate);
}
@ -299,8 +235,7 @@ void CodeStub::GenerateFPStubs(Isolate* isolate) {
USE(isolate);
}
bool CEntryStub::NeedsImmovableCode() {
Movability CEntryStub::NeedsImmovableCode() {
// CEntryStub stores the return address on the stack before calling into
// C++ code. In some cases, the VM accesses this address, but it is not used
// when the C++ code returns to the stub because LR holds the return address
@ -309,7 +244,7 @@ bool CEntryStub::NeedsImmovableCode() {
// TODO(jbramley): Whilst this is the only analysis that makes sense, I can't
// find any comment to confirm this, and I don't hit any crashes whatever
// this function returns. The anaylsis should be properly confirmed.
return true;
return kImmovable;
}
@ -369,7 +304,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// - Adjust for the arg[] array.
Register temp_argv = x11;
if (!argv_in_register()) {
__ Add(temp_argv, jssp, Operand(x0, LSL, kPointerSizeLog2));
__ SlotAddress(temp_argv, x0);
// - Adjust for the receiver.
__ Sub(temp_argv, temp_argv, 1 * kPointerSize);
}
@ -480,11 +415,11 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ Peek(argc, 2 * kPointerSize);
__ Peek(target, 3 * kPointerSize);
__ LeaveExitFrame(save_doubles(), x10, true);
__ LeaveExitFrame(save_doubles(), x10);
DCHECK(jssp.Is(__ StackPointer()));
if (!argv_in_register()) {
// Drop the remaining stack slots and return from the stub.
__ Drop(x11);
__ DropArguments(x11);
}
__ AssertFPCRState();
__ Ret();
@ -498,10 +433,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
ExternalReference pending_handler_context_address(
IsolateAddressId::kPendingHandlerContextAddress, isolate());
ExternalReference pending_handler_code_address(
IsolateAddressId::kPendingHandlerCodeAddress, isolate());
ExternalReference pending_handler_offset_address(
IsolateAddressId::kPendingHandlerOffsetAddress, isolate());
ExternalReference pending_handler_entrypoint_address(
IsolateAddressId::kPendingHandlerEntrypointAddress, isolate());
ExternalReference pending_handler_fp_address(
IsolateAddressId::kPendingHandlerFPAddress, isolate());
ExternalReference pending_handler_sp_address(
@ -543,12 +476,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ Bind(&not_js_frame);
// Compute the handler entry address and jump to it.
__ Mov(x10, Operand(pending_handler_code_address));
__ Mov(x10, Operand(pending_handler_entrypoint_address));
__ Ldr(x10, MemOperand(x10));
__ Mov(x11, Operand(pending_handler_offset_address));
__ Ldr(x11, MemOperand(x11));
__ Add(x10, x10, Code::kHeaderSize - kHeapObjectTag);
__ Add(x10, x10, x11);
__ Br(x10);
}
@ -610,7 +539,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Select between the inner and outermost frame marker, based on the JS entry
// sp. We assert that the inner marker is zero, so we can use xzr to save a
// move instruction.
DCHECK(StackFrame::INNER_JSENTRY_FRAME == 0);
DCHECK_EQ(StackFrame::INNER_JSENTRY_FRAME, 0);
__ Cmp(x11, 0); // If x11 is zero, this is the outermost frame.
__ Csel(x12, xzr, StackFrame::OUTERMOST_JSENTRY_FRAME, ne);
__ B(ne, &done);
@ -738,371 +667,6 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Ret();
}
void StringHelper::GenerateFlatOneByteStringEquals(
MacroAssembler* masm, Register left, Register right, Register scratch1,
Register scratch2, Register scratch3) {
DCHECK(!AreAliased(left, right, scratch1, scratch2, scratch3));
Register result = x0;
Register left_length = scratch1;
Register right_length = scratch2;
// Compare lengths. If lengths differ, strings can't be equal. Lengths are
// smis, and don't need to be untagged.
Label strings_not_equal, check_zero_length;
__ Ldr(left_length, FieldMemOperand(left, String::kLengthOffset));
__ Ldr(right_length, FieldMemOperand(right, String::kLengthOffset));
__ Cmp(left_length, right_length);
__ B(eq, &check_zero_length);
__ Bind(&strings_not_equal);
__ Mov(result, Smi::FromInt(NOT_EQUAL));
__ Ret();
// Check if the length is zero. If so, the strings must be equal (and empty.)
Label compare_chars;
__ Bind(&check_zero_length);
STATIC_ASSERT(kSmiTag == 0);
__ Cbnz(left_length, &compare_chars);
__ Mov(result, Smi::FromInt(EQUAL));
__ Ret();
// Compare characters. Falls through if all characters are equal.
__ Bind(&compare_chars);
GenerateOneByteCharsCompareLoop(masm, left, right, left_length, scratch2,
scratch3, &strings_not_equal);
// Characters in strings are equal.
__ Mov(result, Smi::FromInt(EQUAL));
__ Ret();
}
void StringHelper::GenerateCompareFlatOneByteStrings(
MacroAssembler* masm, Register left, Register right, Register scratch1,
Register scratch2, Register scratch3, Register scratch4) {
DCHECK(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
Label result_not_equal, compare_lengths;
// Find minimum length and length difference.
Register length_delta = scratch3;
__ Ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
__ Ldr(scratch2, FieldMemOperand(right, String::kLengthOffset));
__ Subs(length_delta, scratch1, scratch2);
Register min_length = scratch1;
__ Csel(min_length, scratch2, scratch1, gt);
__ Cbz(min_length, &compare_lengths);
// Compare loop.
GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
scratch4, &result_not_equal);
// Compare lengths - strings up to min-length are equal.
__ Bind(&compare_lengths);
DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
// Use length_delta as result if it's zero.
Register result = x0;
__ Subs(result, length_delta, 0);
__ Bind(&result_not_equal);
Register greater = x10;
Register less = x11;
__ Mov(greater, Smi::FromInt(GREATER));
__ Mov(less, Smi::FromInt(LESS));
__ CmovX(result, greater, gt);
__ CmovX(result, less, lt);
__ Ret();
}
void StringHelper::GenerateOneByteCharsCompareLoop(
MacroAssembler* masm, Register left, Register right, Register length,
Register scratch1, Register scratch2, Label* chars_not_equal) {
DCHECK(!AreAliased(left, right, length, scratch1, scratch2));
// Change index to run from -length to -1 by adding length to string
// start. This means that loop ends when index reaches zero, which
// doesn't need an additional compare.
__ SmiUntag(length);
__ Add(scratch1, length, SeqOneByteString::kHeaderSize - kHeapObjectTag);
__ Add(left, left, scratch1);
__ Add(right, right, scratch1);
Register index = length;
__ Neg(index, length); // index = -length;
// Compare loop
Label loop;
__ Bind(&loop);
__ Ldrb(scratch1, MemOperand(left, index));
__ Ldrb(scratch2, MemOperand(right, index));
__ Cmp(scratch1, scratch2);
__ B(ne, chars_not_equal);
__ Add(index, index, 1);
__ Cbnz(index, &loop);
}
RecordWriteStub::RegisterAllocation::RegisterAllocation(Register object,
Register address,
Register scratch)
: object_(object),
address_(address),
scratch0_(scratch),
saved_regs_(kCallerSaved),
saved_fp_regs_(kCallerSavedV) {
DCHECK(!AreAliased(scratch, object, address));
// The SaveCallerSaveRegisters method needs to save caller-saved
// registers, but we don't bother saving MacroAssembler scratch registers.
saved_regs_.Remove(MacroAssembler::DefaultTmpList());
saved_fp_regs_.Remove(MacroAssembler::DefaultFPTmpList());
// We would like to require more scratch registers for this stub,
// but the number of registers comes down to the ones used in
// FullCodeGen::SetVar(), which is architecture independent.
// We allocate 2 extra scratch registers that we'll save on the stack.
CPURegList pool_available = GetValidRegistersForAllocation();
CPURegList used_regs(object, address, scratch);
pool_available.Remove(used_regs);
scratch1_ = pool_available.PopLowestIndex().Reg();
scratch2_ = pool_available.PopLowestIndex().Reg();
// The scratch registers will be restored by other means so we don't need
// to save them with the other caller saved registers.
saved_regs_.Remove(scratch0_);
saved_regs_.Remove(scratch1_);
saved_regs_.Remove(scratch2_);
}
RecordWriteStub::Mode RecordWriteStub::GetMode(Code* stub) {
// Find the mode depending on the first two instructions.
Instruction* instr1 =
reinterpret_cast<Instruction*>(stub->instruction_start());
Instruction* instr2 = instr1->following();
if (instr1->IsUncondBranchImm()) {
DCHECK(instr2->IsPCRelAddressing() && (instr2->Rd() == xzr.code()));
return INCREMENTAL;
}
DCHECK(instr1->IsPCRelAddressing() && (instr1->Rd() == xzr.code()));
if (instr2->IsUncondBranchImm()) {
return INCREMENTAL_COMPACTION;
}
DCHECK(instr2->IsPCRelAddressing());
return STORE_BUFFER_ONLY;
}
// We patch the two first instructions of the stub back and forth between an
// adr and branch when we start and stop incremental heap marking.
// The branch is
// b label
// The adr is
// adr xzr label
// so effectively a nop.
void RecordWriteStub::Patch(Code* stub, Mode mode) {
// We are going to patch the two first instructions of the stub.
PatchingAssembler patcher(stub->GetIsolate(), stub->instruction_start(), 2);
Instruction* instr1 = patcher.InstructionAt(0);
Instruction* instr2 = patcher.InstructionAt(kInstructionSize);
// Instructions must be either 'adr' or 'b'.
DCHECK(instr1->IsPCRelAddressing() || instr1->IsUncondBranchImm());
DCHECK(instr2->IsPCRelAddressing() || instr2->IsUncondBranchImm());
// Retrieve the offsets to the labels.
auto offset_to_incremental_noncompacting =
static_cast<int32_t>(instr1->ImmPCOffset());
auto offset_to_incremental_compacting =
static_cast<int32_t>(instr2->ImmPCOffset());
switch (mode) {
case STORE_BUFFER_ONLY:
DCHECK(GetMode(stub) == INCREMENTAL ||
GetMode(stub) == INCREMENTAL_COMPACTION);
patcher.adr(xzr, offset_to_incremental_noncompacting);
patcher.adr(xzr, offset_to_incremental_compacting);
break;
case INCREMENTAL:
DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
patcher.b(offset_to_incremental_noncompacting >> kInstructionSizeLog2);
patcher.adr(xzr, offset_to_incremental_compacting);
break;
case INCREMENTAL_COMPACTION:
DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
patcher.adr(xzr, offset_to_incremental_noncompacting);
patcher.b(offset_to_incremental_compacting >> kInstructionSizeLog2);
break;
}
DCHECK(GetMode(stub) == mode);
}
void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
// We need some extra registers for this stub, they have been allocated
// but we need to save them before using them.
regs_.Save(masm);
if (remembered_set_action() == EMIT_REMEMBERED_SET) {
Label dont_need_remembered_set;
Register val = regs_.scratch0();
__ Ldr(val, MemOperand(regs_.address()));
__ JumpIfNotInNewSpace(val, &dont_need_remembered_set);
__ JumpIfInNewSpace(regs_.object(), &dont_need_remembered_set);
// First notify the incremental marker if necessary, then update the
// remembered set.
CheckNeedsToInformIncrementalMarker(
masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
InformIncrementalMarker(masm);
regs_.Restore(masm); // Restore the extra scratch registers we used.
__ RememberedSetHelper(object(), address(),
value(), // scratch1
save_fp_regs_mode());
__ Bind(&dont_need_remembered_set);
}
CheckNeedsToInformIncrementalMarker(
masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
InformIncrementalMarker(masm);
regs_.Restore(masm); // Restore the extra scratch registers we used.
__ Ret();
}
void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
Register address =
x0.Is(regs_.address()) ? regs_.scratch0() : regs_.address();
DCHECK(!address.Is(regs_.object()));
DCHECK(!address.Is(x0));
__ Mov(address, regs_.address());
__ Mov(x0, regs_.object());
__ Mov(x1, address);
__ Mov(x2, ExternalReference::isolate_address(isolate()));
AllowExternalCallThatCantCauseGC scope(masm);
ExternalReference function =
ExternalReference::incremental_marking_record_write_function(
isolate());
__ CallCFunction(function, 3, 0);
regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
}
void RecordWriteStub::Activate(Code* code) {
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
}
void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode) {
Label need_incremental;
Label need_incremental_pop_scratch;
#ifndef V8_CONCURRENT_MARKING
Label on_black;
// If the object is not black we don't have to inform the incremental marker.
__ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
regs_.Restore(masm); // Restore the extra scratch registers we used.
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
__ RememberedSetHelper(object(), address(),
value(), // scratch1
save_fp_regs_mode());
} else {
__ Ret();
}
__ Bind(&on_black);
#endif
// Get the value from the slot.
Register val = regs_.scratch0();
__ Ldr(val, MemOperand(regs_.address()));
if (mode == INCREMENTAL_COMPACTION) {
Label ensure_not_white;
__ CheckPageFlagClear(val, regs_.scratch1(),
MemoryChunk::kEvacuationCandidateMask,
&ensure_not_white);
__ CheckPageFlagClear(regs_.object(),
regs_.scratch1(),
MemoryChunk::kSkipEvacuationSlotsRecordingMask,
&need_incremental);
__ Bind(&ensure_not_white);
}
// We need extra registers for this, so we push the object and the address
// register temporarily.
__ Push(regs_.address(), regs_.object());
__ JumpIfWhite(val,
regs_.scratch1(), // Scratch.
regs_.object(), // Scratch.
regs_.address(), // Scratch.
regs_.scratch2(), // Scratch.
&need_incremental_pop_scratch);
__ Pop(regs_.object(), regs_.address());
regs_.Restore(masm); // Restore the extra scratch registers we used.
if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
__ RememberedSetHelper(object(), address(),
value(), // scratch1
save_fp_regs_mode());
} else {
__ Ret();
}
__ Bind(&need_incremental_pop_scratch);
__ Pop(regs_.object(), regs_.address());
__ Bind(&need_incremental);
// Fall through when we need to inform the incremental marker.
}
void RecordWriteStub::Generate(MacroAssembler* masm) {
Label skip_to_incremental_noncompacting;
Label skip_to_incremental_compacting;
// We patch these two first instructions back and forth between a nop and
// real branch when we start and stop incremental heap marking.
// Initially the stub is expected to be in STORE_BUFFER_ONLY mode, so 2 nops
// are generated.
// See RecordWriteStub::Patch for details.
{
InstructionAccurateScope scope(masm, 2);
__ adr(xzr, &skip_to_incremental_noncompacting);
__ adr(xzr, &skip_to_incremental_compacting);
}
if (remembered_set_action() == EMIT_REMEMBERED_SET) {
__ RememberedSetHelper(object(), address(),
value(), // scratch1
save_fp_regs_mode());
}
__ Ret();
__ Bind(&skip_to_incremental_noncompacting);
GenerateIncremental(masm, INCREMENTAL);
__ Bind(&skip_to_incremental_compacting);
GenerateIncremental(masm, INCREMENTAL_COMPACTION);
}
// The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
// a "Push lr" instruction, followed by a call.
static const unsigned int kProfileEntryHookCallSize =
@ -1110,21 +674,21 @@ static const unsigned int kProfileEntryHookCallSize =
void ProfileEntryHookStub::MaybeCallEntryHookDelayed(TurboAssembler* tasm,
Zone* zone) {
if (tasm->isolate()->function_entry_hook() != NULL) {
if (tasm->isolate()->function_entry_hook() != nullptr) {
Assembler::BlockConstPoolScope no_const_pools(tasm);
DontEmitDebugCodeScope no_debug_code(tasm);
Label entry_hook_call_start;
tasm->Bind(&entry_hook_call_start);
tasm->Push(padreg, lr);
tasm->CallStubDelayed(new (zone) ProfileEntryHookStub(nullptr));
DCHECK(tasm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
kProfileEntryHookCallSize);
DCHECK_EQ(tasm->SizeOfCodeGeneratedSince(&entry_hook_call_start),
kProfileEntryHookCallSize);
tasm->Pop(lr, padreg);
}
}
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
if (masm->isolate()->function_entry_hook() != nullptr) {
ProfileEntryHookStub stub(masm->isolate());
Assembler::BlockConstPoolScope no_const_pools(masm);
DontEmitDebugCodeScope no_debug_code(masm);
@ -1132,8 +696,8 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
__ Bind(&entry_hook_call_start);
__ Push(padreg, lr);
__ CallStub(&stub);
DCHECK(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
kProfileEntryHookCallSize);
DCHECK_EQ(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start),
kProfileEntryHookCallSize);
__ Pop(lr, padreg);
}
}
@ -1170,7 +734,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
// The caller's return address is above the saved temporaries.
// Grab its location for the second argument to the hook.
__ Add(x1, __ StackPointer(), kNumSavedRegs * kPointerSize);
__ SlotAddress(x1, kNumSavedRegs);
{
// Create a dummy frame, as CallCFunction requires this.
@ -1218,164 +782,6 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
__ Blr(lr);
}
void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
Label* miss,
Label* done,
Register receiver,
Register properties,
Handle<Name> name,
Register scratch0) {
DCHECK(!AreAliased(receiver, properties, scratch0));
DCHECK(name->IsUniqueName());
// If names of slots in range from 1 to kProbes - 1 for the hash value are
// not equal to the name and kProbes-th slot is not used (its name is the
// undefined value), it guarantees the hash table doesn't contain the
// property. It's true even if some slots represent deleted properties
// (their names are the hole value).
for (int i = 0; i < kInlinedProbes; i++) {
// scratch0 points to properties hash.
// Compute the masked index: (hash + i + i * i) & mask.
Register index = scratch0;
// Capacity is smi 2^n.
__ Ldrsw(index, UntagSmiFieldMemOperand(properties, kCapacityOffset));
__ Sub(index, index, 1);
__ And(index, index, name->Hash() + NameDictionary::GetProbeOffset(i));
// Scale the index by multiplying by the entry size.
STATIC_ASSERT(NameDictionary::kEntrySize == 3);
__ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
Register entity_name = scratch0;
// Having undefined at this place means the name is not contained.
Register tmp = index;
__ Add(tmp, properties, Operand(index, LSL, kPointerSizeLog2));
__ Ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
__ JumpIfRoot(entity_name, Heap::kUndefinedValueRootIndex, done);
// Stop if found the property.
__ Cmp(entity_name, Operand(name));
__ B(eq, miss);
Label good;
__ JumpIfRoot(entity_name, Heap::kTheHoleValueRootIndex, &good);
// Check if the entry name is not a unique name.
__ Ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
__ Ldrb(entity_name,
FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueNameInstanceType(entity_name, miss);
__ Bind(&good);
}
CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6);
spill_list.Remove(scratch0); // Scratch registers don't need to be preserved.
spill_list.Combine(lr);
spill_list.Combine(padreg); // Add padreg to make the list of even length.
DCHECK_EQ(spill_list.Count() % 2, 0);
__ PushCPURegList(spill_list);
__ Ldr(x0, FieldMemOperand(receiver, JSObject::kPropertiesOrHashOffset));
__ Mov(x1, Operand(name));
NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
__ CallStub(&stub);
// Move stub return value to scratch0. Note that scratch0 is not included in
// spill_list and won't be clobbered by PopCPURegList.
__ Mov(scratch0, x0);
__ PopCPURegList(spill_list);
__ Cbz(scratch0, done);
__ B(miss);
}
void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// This stub overrides SometimesSetsUpAFrame() to return false. That means
// we cannot call anything that could cause a GC from this stub.
//
// Arguments are in x0 and x1:
// x0: property dictionary.
// x1: the name of the property we are looking for.
//
// Return value is in x0 and is zero if lookup failed, non zero otherwise.
// If the lookup is successful, x2 will contains the index of the entry.
Register result = x0;
Register dictionary = x0;
Register key = x1;
Register index = x2;
Register mask = x3;
Register hash = x4;
Register undefined = x5;
Register entry_key = x6;
Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
__ Ldrsw(mask, UntagSmiFieldMemOperand(dictionary, kCapacityOffset));
__ Sub(mask, mask, 1);
__ Ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
__ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
for (int i = kInlinedProbes; i < kTotalProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
// Capacity is smi 2^n.
if (i > 0) {
// Add the probe offset (i + i * i) left shifted to avoid right shifting
// the hash in a separate instruction. The value hash + i + i * i is right
// shifted in the following and instruction.
DCHECK(NameDictionary::GetProbeOffset(i) <
1 << (32 - Name::kHashFieldOffset));
__ Add(index, hash,
NameDictionary::GetProbeOffset(i) << Name::kHashShift);
} else {
__ Mov(index, hash);
}
__ And(index, mask, Operand(index, LSR, Name::kHashShift));
// Scale the index by multiplying by the entry size.
STATIC_ASSERT(NameDictionary::kEntrySize == 3);
__ Add(index, index, Operand(index, LSL, 1)); // index *= 3.
__ Add(index, dictionary, Operand(index, LSL, kPointerSizeLog2));
__ Ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
// Having undefined at this place means the name is not contained.
__ Cmp(entry_key, undefined);
__ B(eq, &not_in_dictionary);
// Stop if found the property.
__ Cmp(entry_key, key);
__ B(eq, &in_dictionary);
if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
// Check if the entry name is not a unique name.
__ Ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
__ Ldrb(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
}
}
__ Bind(&maybe_in_dictionary);
// If we are doing negative lookup then probing failure should be
// treated as a lookup success. For positive lookup, probing failure
// should be treated as lookup failure.
if (mode() == POSITIVE_LOOKUP) {
__ Mov(result, 0);
__ Ret();
}
__ Bind(&in_dictionary);
__ Mov(result, 1);
__ Ret();
__ Bind(&not_in_dictionary);
__ Mov(result, 0);
__ Ret();
}
template<class T>
static void CreateArrayDispatch(MacroAssembler* masm,
AllocationSiteOverrideMode mode) {
@ -1562,7 +968,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map.
__ Ldr(x10, FieldMemOperand(constructor,
JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
// Will both indicate a nullptr and a Smi.
__ JumpIfSmi(x10, &unexpected_map);
__ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
__ Bind(&unexpected_map);
@ -1571,7 +977,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// We should either have undefined in the allocation_site register or a
// valid AllocationSite.
__ AssertUndefinedOrAllocationSite(allocation_site, x10);
__ AssertUndefinedOrAllocationSite(allocation_site);
}
// Enter the context of the Array function.
@ -1659,7 +1065,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map.
__ Ldr(x10, FieldMemOperand(constructor,
JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
// Will both indicate a nullptr and a Smi.
__ JumpIfSmi(x10, &unexpected_map);
__ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok);
__ Bind(&unexpected_map);
@ -1710,8 +1116,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
Register function_address,
ExternalReference thunk_ref,
int stack_space, int spill_offset,
MemOperand return_value_operand,
MemOperand* context_restore_operand) {
MemOperand return_value_operand) {
ASM_LOCATION("CallApiFunctionAndReturn");
Isolate* isolate = masm->isolate();
ExternalReference next_address =
@ -1813,12 +1218,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ Peek(x21, (spill_offset + 2) * kXRegSize);
__ Peek(x22, (spill_offset + 3) * kXRegSize);
bool restore_context = context_restore_operand != NULL;
if (restore_context) {
__ Ldr(cp, *context_restore_operand);
}
__ LeaveExitFrame(false, x1, !restore_context);
__ LeaveExitFrame(false, x1);
// Check if the function scheduled an exception.
__ Mov(x5, ExternalReference::scheduled_exception_address(isolate));
@ -1848,7 +1248,6 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x0 : callee
// -- x4 : call_data
// -- x2 : holder
// -- x1 : api_function_address
@ -1858,21 +1257,16 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// -- ...
// -- sp[(argc - 1) * 8] : first argument
// -- sp[argc * 8] : receiver
// -- sp[(argc + 1) * 8] : accessor_holder
// -----------------------------------
Register callee = x0;
Register call_data = x4;
Register holder = x2;
Register api_function_address = x1;
Register context = cp;
typedef FunctionCallbackArguments FCA;
STATIC_ASSERT(FCA::kArgsLength == 8);
STATIC_ASSERT(FCA::kNewTargetIndex == 7);
STATIC_ASSERT(FCA::kContextSaveIndex == 6);
STATIC_ASSERT(FCA::kCalleeIndex == 5);
STATIC_ASSERT(FCA::kArgsLength == 6);
STATIC_ASSERT(FCA::kNewTargetIndex == 5);
STATIC_ASSERT(FCA::kDataIndex == 4);
STATIC_ASSERT(FCA::kReturnValueOffset == 3);
STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
@ -1882,8 +1276,8 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
Register undef = x7;
__ LoadRoot(undef, Heap::kUndefinedValueRootIndex);
// Push new target, context, callee and call data.
__ Push(undef, context, callee, call_data);
// Push new target, call data.
__ Push(undef, call_data);
Register isolate_reg = x5;
__ Mov(isolate_reg, ExternalReference::isolate_address(masm->isolate()));
@ -1892,40 +1286,6 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// return value, return value default, isolate, holder.
__ Push(undef, undef, isolate_reg, holder);
// Enter a new context.
if (is_lazy()) {
// ----------- S t a t e -------------------------------------
// -- sp[0] : holder
// -- ...
// -- sp[(FCA::kArgsLength - 1) * 8] : new_target
// -- sp[FCA::kArgsLength * 8] : last argument
// -- ...
// -- sp[(FCA::kArgsLength + argc - 1) * 8] : first argument
// -- sp[(FCA::kArgsLength + argc) * 8] : receiver
// -- sp[(FCA::kArgsLength + argc + 1) * 8] : accessor_holder
// -----------------------------------------------------------
// Load context from accessor_holder.
Register accessor_holder = context;
Register scratch = undef;
Register scratch2 = callee;
__ Ldr(accessor_holder,
MemOperand(__ StackPointer(),
(FCA::kArgsLength + 1 + argc()) * kPointerSize));
// Look for the constructor if |accessor_holder| is not a function.
Label skip_looking_for_constructor;
__ Ldr(scratch, FieldMemOperand(accessor_holder, HeapObject::kMapOffset));
__ Ldrb(scratch2, FieldMemOperand(scratch, Map::kBitFieldOffset));
__ Tst(scratch2, Operand(1 << Map::kIsConstructor));
__ B(ne, &skip_looking_for_constructor);
__ GetMapConstructor(context, scratch, scratch, scratch2);
__ Bind(&skip_looking_for_constructor);
__ Ldr(context, FieldMemOperand(context, JSFunction::kContextOffset));
} else {
// Load context from callee.
__ Ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset));
}
// Prepare arguments.
Register args = x6;
__ Mov(args, masm->StackPointer());
@ -1944,7 +1304,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
DCHECK(!AreAliased(x0, api_function_address));
// x0 = FunctionCallbackInfo&
// Arguments is after the return address.
__ Add(x0, masm->StackPointer(), 1 * kPointerSize);
__ SlotAddress(x0, 1);
// FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_
__ Add(x10, args, Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
__ Stp(args, x10, MemOperand(x0, 0 * kPointerSize));
@ -1956,25 +1316,19 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
ExternalReference::invoke_function_callback(masm->isolate());
AllowExternalCallThatCantCauseGC scope(masm);
MemOperand context_restore_operand(
fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
// Stores return the first js argument
int return_value_offset = 0;
if (is_store()) {
return_value_offset = 2 + FCA::kArgsLength;
} else {
return_value_offset = 2 + FCA::kReturnValueOffset;
}
int return_value_offset = 2 + FCA::kReturnValueOffset;
MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
// The number of arguments might be odd, but will be padded when calling the
// stub. We do not round up stack_space here, this will be done in
// CallApiFunctionAndReturn.
const int stack_space = argc() + FCA::kArgsLength + 2;
DCHECK_EQ((stack_space - argc()) % 2, 0);
// stub. We do not round up stack_space to account for odd argc here, this
// will be done in CallApiFunctionAndReturn.
const int stack_space = (argc() + 1) + FCA::kArgsLength;
// The current frame needs to be aligned.
DCHECK_EQ((stack_space - (argc() + 1)) % 2, 0);
const int spill_offset = 1 + kApiStackSpace;
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
spill_offset, return_value_operand,
&context_restore_operand);
spill_offset, return_value_operand);
}
@ -2033,7 +1387,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
// Create v8::PropertyCallbackInfo object on the stack and initialize
// it's args_ field.
__ Poke(x1, 1 * kPointerSize);
__ Add(x1, masm->StackPointer(), 1 * kPointerSize);
__ SlotAddress(x1, 1);
// x1 = v8::PropertyCallbackInfo&
ExternalReference thunk_ref =
@ -2051,7 +1405,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
kStackUnwindSpace, spill_offset,
return_value_operand, NULL);
return_value_operand);
}
#undef __

View File

@ -8,196 +8,6 @@
namespace v8 {
namespace internal {
class StringHelper : public AllStatic {
public:
// Compares two flat one-byte strings and returns result in x0.
static void GenerateCompareFlatOneByteStrings(
MacroAssembler* masm, Register left, Register right, Register scratch1,
Register scratch2, Register scratch3, Register scratch4);
// Compare two flat one-byte strings for equality and returns result in x0.
static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
Register left, Register right,
Register scratch1,
Register scratch2,
Register scratch3);
private:
static void GenerateOneByteCharsCompareLoop(
MacroAssembler* masm, Register left, Register right, Register length,
Register scratch1, Register scratch2, Label* chars_not_equal);
DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
};
class RecordWriteStub: public PlatformCodeStub {
public:
// Stub to record the write of 'value' at 'address' in 'object'.
// Typically 'address' = 'object' + <some offset>.
// See MacroAssembler::RecordWriteField() for example.
RecordWriteStub(Isolate* isolate,
Register object,
Register value,
Register address,
RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode)
: PlatformCodeStub(isolate),
regs_(object, // An input reg.
address, // An input reg.
value) { // One scratch reg.
DCHECK(object.Is64Bits());
DCHECK(value.Is64Bits());
DCHECK(address.Is64Bits());
minor_key_ = ObjectBits::encode(object.code()) |
ValueBits::encode(value.code()) |
AddressBits::encode(address.code()) |
RememberedSetActionBits::encode(remembered_set_action) |
SaveFPRegsModeBits::encode(fp_mode);
}
RecordWriteStub(uint32_t key, Isolate* isolate)
: PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
enum Mode {
STORE_BUFFER_ONLY,
INCREMENTAL,
INCREMENTAL_COMPACTION
};
bool SometimesSetsUpAFrame() override { return false; }
static Mode GetMode(Code* stub);
static void Patch(Code* stub, Mode mode);
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
private:
// This is a helper class to manage the registers associated with the stub.
// The 'object' and 'address' registers must be preserved.
class RegisterAllocation {
public:
RegisterAllocation(Register object, Register address, Register scratch);
void Save(MacroAssembler* masm) {
// We don't have to save scratch0_ because it was given to us as
// a scratch register.
masm->Push(scratch1_, scratch2_);
}
void Restore(MacroAssembler* masm) {
masm->Pop(scratch2_, scratch1_);
}
// If we have to call into C then we need to save and restore all caller-
// saved registers that were not already preserved.
void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
// TODO(all): This can be very expensive, and it is likely that not every
// register will need to be preserved. Can we improve this?
masm->PushCPURegList(saved_regs_);
if (mode == kSaveFPRegs) {
masm->PushCPURegList(saved_fp_regs_);
}
}
void RestoreCallerSaveRegisters(MacroAssembler*masm, SaveFPRegsMode mode) {
// TODO(all): This can be very expensive, and it is likely that not every
// register will need to be preserved. Can we improve this?
if (mode == kSaveFPRegs) {
masm->PopCPURegList(saved_fp_regs_);
}
masm->PopCPURegList(saved_regs_);
}
Register object() { return object_; }
Register address() { return address_; }
Register scratch0() { return scratch0_; }
Register scratch1() { return scratch1_; }
Register scratch2() { return scratch2_; }
private:
Register object_;
Register address_;
Register scratch0_;
Register scratch1_ = NoReg;
Register scratch2_ = NoReg;
CPURegList saved_regs_;
CPURegList saved_fp_regs_;
// TODO(all): We should consider moving this somewhere else.
static CPURegList GetValidRegistersForAllocation() {
// The list of valid registers for allocation is defined as all the
// registers without those with a special meaning.
//
// The default list excludes registers x26 to x31 because they are
// reserved for the following purpose:
// - x26 root register
// - x27 context pointer register
// - x28 jssp
// - x29 frame pointer
// - x30 link register(lr)
// - x31 xzr/stack pointer
CPURegList list(CPURegister::kRegister, kXRegSizeInBits, 0, 25);
// We also remove MacroAssembler's scratch registers.
list.Remove(MacroAssembler::DefaultTmpList());
return list;
}
friend class RecordWriteStub;
};
enum OnNoNeedToInformIncrementalMarker {
kReturnOnNoNeedToInformIncrementalMarker,
kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
};
inline Major MajorKey() const final { return RecordWrite; }
void Generate(MacroAssembler* masm) override;
void GenerateIncremental(MacroAssembler* masm, Mode mode);
void CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode);
void InformIncrementalMarker(MacroAssembler* masm);
void Activate(Code* code) override;
Register object() const {
return Register::from_code(ObjectBits::decode(minor_key_));
}
Register value() const {
return Register::from_code(ValueBits::decode(minor_key_));
}
Register address() const {
return Register::from_code(AddressBits::decode(minor_key_));
}
RememberedSetAction remembered_set_action() const {
return RememberedSetActionBits::decode(minor_key_);
}
SaveFPRegsMode save_fp_regs_mode() const {
return SaveFPRegsModeBits::decode(minor_key_);
}
class ObjectBits: public BitField<int, 0, 5> {};
class ValueBits: public BitField<int, 5, 5> {};
class AddressBits: public BitField<int, 10, 5> {};
class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {};
class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {};
Label slow_;
RegisterAllocation regs_;
};
// Helper to call C++ functions from generated code. The caller must prepare
// the exit frame before doing the call with GenerateCall.
class DirectCEntryStub: public PlatformCodeStub {
@ -206,52 +16,12 @@ class DirectCEntryStub: public PlatformCodeStub {
void GenerateCall(MacroAssembler* masm, Register target);
private:
bool NeedsImmovableCode() override { return true; }
Movability NeedsImmovableCode() override { return kImmovable; }
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
};
class NameDictionaryLookupStub: public PlatformCodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
: PlatformCodeStub(isolate) {
minor_key_ = LookupModeBits::encode(mode);
}
static void GenerateNegativeLookup(MacroAssembler* masm,
Label* miss,
Label* done,
Register receiver,
Register properties,
Handle<Name> name,
Register scratch0);
bool SometimesSetsUpAFrame() override { return false; }
private:
static const int kInlinedProbes = 4;
static const int kTotalProbes = 20;
static const int kCapacityOffset =
NameDictionary::kHeaderSize +
NameDictionary::kCapacityIndex * kPointerSize;
static const int kElementsStartOffset =
NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize;
LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
class LookupModeBits: public BitField<LookupMode, 0, 1> {};
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
};
} // namespace internal
} // namespace v8

View File

@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/arm64/codegen-arm64.h"
#if V8_TARGET_ARCH_ARM64
#include "src/arm64/assembler-arm64-inl.h"
@ -21,100 +19,6 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
return nullptr;
}
// -------------------------------------------------------------------------
// Code generators
void StringCharLoadGenerator::Generate(MacroAssembler* masm,
Register string,
Register index,
Register result,
Label* call_runtime) {
DCHECK(string.Is64Bits() && index.Is32Bits() && result.Is64Bits());
Label indirect_string_loaded;
__ Bind(&indirect_string_loaded);
// Fetch the instance type of the receiver into result register.
__ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
__ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
// We need special handling for indirect strings.
Label check_sequential;
__ TestAndBranchIfAllClear(result, kIsIndirectStringMask, &check_sequential);
// Dispatch on the indirect string shape: slice or cons.
Label cons_string, thin_string;
__ And(result, result, kStringRepresentationMask);
__ Cmp(result, kConsStringTag);
__ B(eq, &cons_string);
__ Cmp(result, kThinStringTag);
__ B(eq, &thin_string);
// Handle slices.
__ Ldr(result.W(),
UntagSmiFieldMemOperand(string, SlicedString::kOffsetOffset));
__ Ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
__ Add(index, index, result.W());
__ B(&indirect_string_loaded);
// Handle thin strings.
__ Bind(&thin_string);
__ Ldr(string, FieldMemOperand(string, ThinString::kActualOffset));
__ B(&indirect_string_loaded);
// Handle cons strings.
// Check whether the right hand side is the empty string (i.e. if
// this is really a flat string in a cons string). If that is not
// the case we would rather go to the runtime system now to flatten
// the string.
__ Bind(&cons_string);
__ Ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
__ JumpIfNotRoot(result, Heap::kempty_stringRootIndex, call_runtime);
// Get the first of the two strings and load its instance type.
__ Ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
__ B(&indirect_string_loaded);
// Distinguish sequential and external strings. Only these two string
// representations can reach here (slices and flat cons strings have been
// reduced to the underlying sequential or external string).
Label external_string, check_encoding;
__ Bind(&check_sequential);
STATIC_ASSERT(kSeqStringTag == 0);
__ TestAndBranchIfAnySet(result, kStringRepresentationMask, &external_string);
// Prepare sequential strings
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
__ Add(string, string, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
__ B(&check_encoding);
// Handle external strings.
__ Bind(&external_string);
if (FLAG_debug_code) {
// Assert that we do not have a cons or slice (indirect strings) here.
// Sequential strings have already been ruled out.
__ Tst(result, kIsIndirectStringMask);
__ Assert(eq, kExternalStringExpectedButNotFound);
}
// Rule out short external strings.
STATIC_ASSERT(kShortExternalStringTag != 0);
// TestAndBranchIfAnySet can emit Tbnz. Do not use it because call_runtime
// can be bound far away in deferred code.
__ Tst(result, kShortExternalStringMask);
__ B(ne, call_runtime);
__ Ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
Label one_byte, done;
__ Bind(&check_encoding);
STATIC_ASSERT(kTwoByteStringTag == 0);
__ TestAndBranchIfAnySet(result, kStringEncodingMask, &one_byte);
// Two-byte string.
__ Ldrh(result, MemOperand(string, index, SXTW, 1));
__ B(&done);
__ Bind(&one_byte);
// One-byte string.
__ Ldrb(result, MemOperand(string, index, SXTW));
__ Bind(&done);
}
#undef __
} // namespace internal

View File

@ -1,32 +0,0 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_ARM64_CODEGEN_ARM64_H_
#define V8_ARM64_CODEGEN_ARM64_H_
#include "src/macro-assembler.h"
namespace v8 {
namespace internal {
class StringCharLoadGenerator : public AllStatic {
public:
// Generates the code for handling different string types and loading the
// indexed character into |result|. We expect |index| as untagged input and
// |result| as untagged output. Register index is asserted to be a 32-bit W
// register.
static void Generate(MacroAssembler* masm,
Register string,
Register index,
Register result,
Label* call_runtime);
private:
DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
};
} // namespace internal
} // namespace v8
#endif // V8_ARM64_CODEGEN_ARM64_H_

View File

@ -49,8 +49,8 @@ void CpuFeatures::FlushICache(void* address, size_t length) {
uintptr_t dsize = sizes.dcache_line_size();
uintptr_t isize = sizes.icache_line_size();
// Cache line sizes are always a power of 2.
DCHECK(CountSetBits(dsize, 64) == 1);
DCHECK(CountSetBits(isize, 64) == 1);
DCHECK_EQ(CountSetBits(dsize, 64), 1);
DCHECK_EQ(CountSetBits(isize, 64), 1);
uintptr_t dstart = start & ~(dsize - 1);
uintptr_t istart = start & ~(isize - 1);
uintptr_t end = start + length;

View File

@ -96,10 +96,10 @@ void Decoder<V>::Decode(Instruction *instr) {
template<typename V>
void Decoder<V>::DecodePCRelAddressing(Instruction* instr) {
DCHECK(instr->Bits(27, 24) == 0x0);
DCHECK_EQ(0x0, instr->Bits(27, 24));
// We know bit 28 is set, as <b28:b27> = 0 is filtered out at the top level
// decode.
DCHECK(instr->Bit(28) == 0x1);
DCHECK_EQ(0x1, instr->Bit(28));
V::VisitPCRelAddressing(instr);
}
@ -339,7 +339,7 @@ void Decoder<V>::DecodeLoadStore(Instruction* instr) {
template<typename V>
void Decoder<V>::DecodeLogical(Instruction* instr) {
DCHECK(instr->Bits(27, 24) == 0x2);
DCHECK_EQ(0x2, instr->Bits(27, 24));
if (instr->Mask(0x80400000) == 0x00400000) {
V::VisitUnallocated(instr);
@ -359,7 +359,7 @@ void Decoder<V>::DecodeLogical(Instruction* instr) {
template<typename V>
void Decoder<V>::DecodeBitfieldExtract(Instruction* instr) {
DCHECK(instr->Bits(27, 24) == 0x3);
DCHECK_EQ(0x3, instr->Bits(27, 24));
if ((instr->Mask(0x80400000) == 0x80000000) ||
(instr->Mask(0x80400000) == 0x00400000) ||
@ -385,7 +385,7 @@ void Decoder<V>::DecodeBitfieldExtract(Instruction* instr) {
template<typename V>
void Decoder<V>::DecodeAddSubImmediate(Instruction* instr) {
DCHECK(instr->Bits(27, 24) == 0x1);
DCHECK_EQ(0x1, instr->Bits(27, 24));
if (instr->Bit(23) == 1) {
V::VisitUnallocated(instr);
} else {
@ -623,7 +623,7 @@ void Decoder<V>::DecodeFP(Instruction* instr) {
}
} else {
// Bit 30 == 1 has been handled earlier.
DCHECK(instr->Bit(30) == 0);
DCHECK_EQ(0, instr->Bit(30));
if (instr->Mask(0xA0800000) != 0) {
V::VisitUnallocated(instr);
} else {
@ -639,7 +639,7 @@ void Decoder<V>::DecodeFP(Instruction* instr) {
template <typename V>
void Decoder<V>::DecodeNEONLoadStore(Instruction* instr) {
DCHECK(instr->Bits(29, 25) == 0x6);
DCHECK_EQ(0x6, instr->Bits(29, 25));
if (instr->Bit(31) == 0) {
if ((instr->Bit(24) == 0) && (instr->Bit(21) == 1)) {
V::VisitUnallocated(instr);
@ -670,7 +670,7 @@ void Decoder<V>::DecodeNEONLoadStore(Instruction* instr) {
template <typename V>
void Decoder<V>::DecodeNEONVectorDataProcessing(Instruction* instr) {
DCHECK(instr->Bits(28, 25) == 0x7);
DCHECK_EQ(0x7, instr->Bits(28, 25));
if (instr->Bit(31) == 0) {
if (instr->Bit(24) == 0) {
if (instr->Bit(21) == 0) {
@ -748,7 +748,7 @@ void Decoder<V>::DecodeNEONVectorDataProcessing(Instruction* instr) {
template <typename V>
void Decoder<V>::DecodeNEONScalarDataProcessing(Instruction* instr) {
DCHECK(instr->Bits(28, 25) == 0xF);
DCHECK_EQ(0xF, instr->Bits(28, 25));
if (instr->Bit(24) == 0) {
if (instr->Bit(21) == 0) {
if (instr->Bit(15) == 0) {

View File

@ -5,7 +5,6 @@
#include "src/api.h"
#include "src/arm64/assembler-arm64-inl.h"
#include "src/arm64/macro-assembler-arm64-inl.h"
#include "src/codegen.h"
#include "src/deoptimizer.h"
#include "src/frame-constants.h"
#include "src/register-configuration.h"
@ -17,6 +16,77 @@ namespace internal {
#define __ masm()->
namespace {
void CopyRegListToFrame(MacroAssembler* masm, const Register& dst,
int dst_offset, const CPURegList& reg_list,
const Register& temp0, const Register& temp1,
int src_offset = 0) {
DCHECK_EQ(reg_list.Count() % 2, 0);
UseScratchRegisterScope temps(masm);
CPURegList copy_to_input = reg_list;
int reg_size = reg_list.RegisterSizeInBytes();
DCHECK_EQ(temp0.SizeInBytes(), reg_size);
DCHECK_EQ(temp1.SizeInBytes(), reg_size);
// Compute some temporary addresses to avoid having the macro assembler set
// up a temp with an offset for accesses out of the range of the addressing
// mode.
Register src = temps.AcquireX();
masm->Add(src, masm->StackPointer(), src_offset);
masm->Add(dst, dst, dst_offset);
// Write reg_list into the frame pointed to by dst.
for (int i = 0; i < reg_list.Count(); i += 2) {
masm->Ldp(temp0, temp1, MemOperand(src, i * reg_size));
CPURegister reg0 = copy_to_input.PopLowestIndex();
CPURegister reg1 = copy_to_input.PopLowestIndex();
int offset0 = reg0.code() * reg_size;
int offset1 = reg1.code() * reg_size;
// Pair up adjacent stores, otherwise write them separately.
if (offset1 == offset0 + reg_size) {
masm->Stp(temp0, temp1, MemOperand(dst, offset0));
} else {
masm->Str(temp0, MemOperand(dst, offset0));
masm->Str(temp1, MemOperand(dst, offset1));
}
}
masm->Sub(dst, dst, dst_offset);
}
void RestoreRegList(MacroAssembler* masm, const CPURegList& reg_list,
const Register& src_base, int src_offset) {
DCHECK_EQ(reg_list.Count() % 2, 0);
UseScratchRegisterScope temps(masm);
CPURegList restore_list = reg_list;
int reg_size = restore_list.RegisterSizeInBytes();
// Compute a temporary addresses to avoid having the macro assembler set
// up a temp with an offset for accesses out of the range of the addressing
// mode.
Register src = temps.AcquireX();
masm->Add(src, src_base, src_offset);
// Restore every register in restore_list from src.
while (!restore_list.IsEmpty()) {
CPURegister reg0 = restore_list.PopLowestIndex();
CPURegister reg1 = restore_list.PopLowestIndex();
int offset0 = reg0.code() * reg_size;
int offset1 = reg1.code() * reg_size;
// Pair up adjacent loads, otherwise read them separately.
if (offset1 == offset0 + reg_size) {
masm->Ldp(reg0, reg1, MemOperand(src, offset0));
} else {
masm->Ldr(reg0, MemOperand(src, offset0));
masm->Ldr(reg1, MemOperand(src, offset1));
}
}
}
} // namespace
void Deoptimizer::TableEntryGenerator::Generate() {
GeneratePrologue();
@ -28,17 +98,23 @@ void Deoptimizer::TableEntryGenerator::Generate() {
CPURegList saved_double_registers(
CPURegister::kVRegister, kDRegSizeInBits,
RegisterConfiguration::Default()->allocatable_double_codes_mask());
DCHECK_EQ(saved_double_registers.Count() % 2, 0);
__ PushCPURegList(saved_double_registers);
// Save all allocatable float registers.
CPURegList saved_float_registers(
CPURegister::kVRegister, kSRegSizeInBits,
RegisterConfiguration::Default()->allocatable_float_codes_mask());
DCHECK_EQ(saved_float_registers.Count() % 4, 0);
__ PushCPURegList(saved_float_registers);
// We save all the registers expcept jssp, sp and lr.
// We save all the registers except sp, lr and the masm scratches.
CPURegList saved_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 27);
saved_registers.Remove(ip0);
saved_registers.Remove(ip1);
// TODO(arm): padding here can be replaced with jssp/x28 when allocatable.
saved_registers.Combine(padreg);
saved_registers.Combine(fp);
DCHECK_EQ(saved_registers.Count() % 2, 0);
__ PushCPURegList(saved_registers);
__ Mov(x3, Operand(ExternalReference(IsolateAddressId::kCEntryFPAddress,
@ -64,18 +140,24 @@ void Deoptimizer::TableEntryGenerator::Generate() {
// Get the address of the location in the code object. This is the return
// address for lazy deoptimization.
__ Mov(code_object, lr);
// Compute the fp-to-sp delta, and correct one word for bailout id.
// Compute the fp-to-sp delta, adding two words for alignment padding and
// bailout id.
__ Add(fp_to_sp, __ StackPointer(),
kSavedRegistersAreaSize + (1 * kPointerSize));
kSavedRegistersAreaSize + (2 * kPointerSize));
__ Sub(fp_to_sp, fp, fp_to_sp);
// Allocate a new deoptimizer object.
__ Mov(x0, 0);
Label context_check;
__ Ldr(x1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
__ JumpIfSmi(x1, &context_check);
// Ensure we can safely load from below fp.
DCHECK_GT(kSavedRegistersAreaSize,
-JavaScriptFrameConstants::kFunctionOffset);
__ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ bind(&context_check);
// If x1 is a smi, zero x0.
__ Tst(x1, kSmiTagMask);
__ CzeroX(x0, eq);
__ Mov(x1, type());
// Following arguments are already loaded:
// - x2: bailout id
@ -96,70 +178,47 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ Ldr(x1, MemOperand(deoptimizer, Deoptimizer::input_offset()));
// Copy core registers into the input frame.
CPURegList copy_to_input = saved_registers;
for (int i = 0; i < saved_registers.Count(); i++) {
__ Peek(x2, i * kPointerSize);
CPURegister current_reg = copy_to_input.PopLowestIndex();
int offset = (current_reg.code() * kPointerSize) +
FrameDescription::registers_offset();
__ Str(x2, MemOperand(x1, offset));
}
CopyRegListToFrame(masm(), x1, FrameDescription::registers_offset(),
saved_registers, x2, x3);
// Copy double registers to the input frame.
CPURegList copy_double_to_input = saved_double_registers;
for (int i = 0; i < saved_double_registers.Count(); i++) {
int src_offset = kDoubleRegistersOffset + (i * kDoubleSize);
__ Peek(x2, src_offset);
CPURegister reg = copy_double_to_input.PopLowestIndex();
int dst_offset = FrameDescription::double_registers_offset() +
(reg.code() * kDoubleSize);
__ Str(x2, MemOperand(x1, dst_offset));
}
CopyRegListToFrame(masm(), x1, FrameDescription::double_registers_offset(),
saved_double_registers, x2, x3, kDoubleRegistersOffset);
// Copy float registers to the input frame.
CPURegList copy_float_to_input = saved_float_registers;
for (int i = 0; i < saved_float_registers.Count(); i++) {
int src_offset = kFloatRegistersOffset + (i * kFloatSize);
__ Peek(w2, src_offset);
CPURegister reg = copy_float_to_input.PopLowestIndex();
int dst_offset =
FrameDescription::float_registers_offset() + (reg.code() * kFloatSize);
__ Str(w2, MemOperand(x1, dst_offset));
}
// TODO(arm): these are the lower 32-bits of the double registers stored
// above, so we shouldn't need to store them again.
CopyRegListToFrame(masm(), x1, FrameDescription::float_registers_offset(),
saved_float_registers, w2, w3, kFloatRegistersOffset);
// Remove the bailout id and the saved registers from the stack.
__ Drop(1 + (kSavedRegistersAreaSize / kXRegSize));
// Remove the padding, bailout id and the saved registers from the stack.
DCHECK_EQ(kSavedRegistersAreaSize % kXRegSize, 0);
__ Drop(2 + (kSavedRegistersAreaSize / kXRegSize));
// Compute a pointer to the unwinding limit in register x2; that is
// the first stack slot not part of the input frame.
Register unwind_limit = x2;
__ Ldr(unwind_limit, MemOperand(x1, FrameDescription::frame_size_offset()));
__ Add(unwind_limit, unwind_limit, __ StackPointer());
// Unwind the stack down to - but not including - the unwinding
// limit and copy the contents of the activation frame to the input
// frame description.
__ Add(x3, x1, FrameDescription::frame_content_offset());
Label pop_loop;
Label pop_loop_header;
__ B(&pop_loop_header);
__ Bind(&pop_loop);
__ Pop(x4);
__ Str(x4, MemOperand(x3, kPointerSize, PostIndex));
__ Bind(&pop_loop_header);
__ Cmp(unwind_limit, __ StackPointer());
__ B(ne, &pop_loop);
__ SlotAddress(x1, 0);
__ Lsr(unwind_limit, unwind_limit, kPointerSizeLog2);
__ Mov(x5, unwind_limit);
__ CopyDoubleWords(x3, x1, x5);
__ Drop(unwind_limit);
// Compute the output frame in the deoptimizer.
__ Push(x0); // Preserve deoptimizer object across call.
__ Push(padreg, x0); // Preserve deoptimizer object across call.
{
// Call Deoptimizer::ComputeOutputFrames().
AllowExternalCallThatCantCauseGC scope(masm());
__ CallCFunction(
ExternalReference::compute_output_frames_function(isolate()), 1);
}
__ Pop(x4); // Restore deoptimizer object (class Deoptimizer).
__ Pop(x4, padreg); // Restore deoptimizer object (class Deoptimizer).
__ Ldr(__ StackPointer(),
MemOperand(x4, Deoptimizer::caller_frame_top_offset()));
@ -174,43 +233,29 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ Bind(&outer_push_loop);
Register current_frame = x2;
__ Ldr(current_frame, MemOperand(x0, 0));
Register frame_size = x3;
__ Ldr(current_frame, MemOperand(x0, kPointerSize, PostIndex));
__ Ldr(x3, MemOperand(current_frame, FrameDescription::frame_size_offset()));
__ B(&inner_loop_header);
__ Lsr(frame_size, x3, kPointerSizeLog2);
__ Claim(frame_size);
__ Bind(&inner_push_loop);
__ Sub(x3, x3, kPointerSize);
__ Add(x6, current_frame, x3);
__ Ldr(x7, MemOperand(x6, FrameDescription::frame_content_offset()));
__ Push(x7);
__ Bind(&inner_loop_header);
__ Cbnz(x3, &inner_push_loop);
__ Add(x7, current_frame, FrameDescription::frame_content_offset());
__ SlotAddress(x6, 0);
__ CopyDoubleWords(x6, x7, frame_size);
__ Add(x0, x0, kPointerSize);
__ Bind(&outer_loop_header);
__ Cmp(x0, x1);
__ B(lt, &outer_push_loop);
__ Ldr(x1, MemOperand(x4, Deoptimizer::input_offset()));
DCHECK(!saved_double_registers.IncludesAliasOf(crankshaft_fp_scratch) &&
!saved_double_registers.IncludesAliasOf(fp_zero) &&
!saved_double_registers.IncludesAliasOf(fp_scratch));
while (!saved_double_registers.IsEmpty()) {
const CPURegister reg = saved_double_registers.PopLowestIndex();
int src_offset = FrameDescription::double_registers_offset() +
(reg.code() * kDoubleSize);
__ Ldr(reg, MemOperand(x1, src_offset));
}
RestoreRegList(masm(), saved_double_registers, x1,
FrameDescription::double_registers_offset());
// TODO(all): ARM copies a lot (if not all) of the last output frame onto the
// stack, then pops it all into registers. Here, we try to load it directly
// into the relevant registers. Is this correct? If so, we should improve the
// ARM code.
// TODO(all): This code needs to be revisited, We probably don't need to
// restore all the registers as fullcodegen does not keep live values in
// registers (note that at least fp must be restored though).
// Restore registers from the last output frame.
// Note that lr is not in the list of saved_registers and will be restored
// later. We can use it to hold the address of last output frame while
@ -219,19 +264,10 @@ void Deoptimizer::TableEntryGenerator::Generate() {
Register last_output_frame = lr;
__ Mov(last_output_frame, current_frame);
// We don't need to restore x7 as it will be clobbered later to hold the
// continuation address.
RestoreRegList(masm(), saved_registers, last_output_frame,
FrameDescription::registers_offset());
Register continuation = x7;
saved_registers.Remove(continuation);
while (!saved_registers.IsEmpty()) {
// TODO(all): Look for opportunities to optimize this by using ldp.
CPURegister current_reg = saved_registers.PopLowestIndex();
int offset = (current_reg.code() * kPointerSize) +
FrameDescription::registers_offset();
__ Ldr(current_reg, MemOperand(last_output_frame, offset));
}
__ Ldr(continuation, MemOperand(last_output_frame,
FrameDescription::continuation_offset()));
__ Ldr(lr, MemOperand(last_output_frame, FrameDescription::pc_offset()));
@ -239,37 +275,57 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ Br(continuation);
}
// Size of an entry of the second level deopt table.
// This is the code size generated by GeneratePrologue for one entry.
const int Deoptimizer::table_entry_size_ = 2 * kInstructionSize;
const int Deoptimizer::table_entry_size_ = kInstructionSize;
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
UseScratchRegisterScope temps(masm());
// The address at which the deopt table is entered should be in x16, the first
// temp register allocated. We can't assert that the address is in there, but
// we can check that it's the first allocated temp. Later, we'll also check
// the computed entry_id is in the expected range.
Register entry_addr = temps.AcquireX();
Register entry_id = temps.AcquireX();
DCHECK(entry_addr.Is(x16));
DCHECK(entry_id.Is(x17));
// Create a sequence of deoptimization entries.
// Note that registers are still live when jumping to an entry.
Label done;
{
InstructionAccurateScope scope(masm());
// The number of entry will never exceed kMaxNumberOfEntries.
// As long as kMaxNumberOfEntries is a valid 16 bits immediate you can use
// a movz instruction to load the entry id.
DCHECK(is_uint16(Deoptimizer::kMaxNumberOfEntries));
Label start_of_table, end_of_table;
__ bind(&start_of_table);
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
__ movz(entry_id, i);
__ b(&done);
__ b(&end_of_table);
DCHECK(masm()->pc_offset() - start == table_entry_size_);
}
__ bind(&end_of_table);
// Get the address of the start of the table.
DCHECK(is_int21(table_entry_size_ * count()));
__ adr(entry_id, &start_of_table);
// Compute the gap in bytes between the entry address, which should have
// been left in entry_addr (x16) by CallForDeoptimization, and the start of
// the table.
__ sub(entry_id, entry_addr, entry_id);
// Shift down to obtain the entry_id.
DCHECK_EQ(table_entry_size_, kInstructionSize);
__ lsr(entry_id, entry_id, kInstructionSizeLog2);
}
__ Push(padreg, entry_id);
if (__ emit_debug_code()) {
// Ensure the entry_id looks sensible, ie. 0 <= entry_id < count().
__ Cmp(entry_id, count());
__ Check(lo, kOffsetOutOfRange);
}
__ Bind(&done);
__ Push(entry_id);
}
bool Deoptimizer::PadTopOfStackRegister() { return true; }

View File

@ -547,7 +547,7 @@ void DisassemblingDecoder::VisitUnconditionalBranchToRegister(
case RET: {
mnemonic = "ret";
if (instr->Rn() == kLinkRegCode) {
form = NULL;
form = nullptr;
}
break;
}
@ -1244,7 +1244,7 @@ void DisassemblingDecoder::VisitSystem(Instruction* instr) {
switch (instr->ImmHint()) {
case NOP: {
mnemonic = "nop";
form = NULL;
form = nullptr;
break;
}
}
@ -1262,7 +1262,7 @@ void DisassemblingDecoder::VisitSystem(Instruction* instr) {
}
case ISB: {
mnemonic = "isb";
form = NULL;
form = nullptr;
break;
}
}
@ -1334,8 +1334,8 @@ void DisassemblingDecoder::VisitNEON3Same(Instruction* instr) {
"shadd", "uhadd", "shadd", "uhadd",
"sqadd", "uqadd", "sqadd", "uqadd",
"srhadd", "urhadd", "srhadd", "urhadd",
NULL, NULL, NULL,
NULL, // Handled by logical cases above.
nullptr, nullptr, nullptr,
nullptr, // Handled by logical cases above.
"shsub", "uhsub", "shsub", "uhsub",
"sqsub", "uqsub", "sqsub", "uqsub",
"cmgt", "cmhi", "cmgt", "cmhi",
@ -1976,8 +1976,8 @@ void DisassemblingDecoder::VisitNEONExtract(Instruction* instr) {
}
void DisassemblingDecoder::VisitNEONLoadStoreMultiStruct(Instruction* instr) {
const char* mnemonic = NULL;
const char* form = NULL;
const char* mnemonic = nullptr;
const char* form = nullptr;
const char* form_1v = "{'Vt.%1$s}, ['Xns]";
const char* form_2v = "{'Vt.%1$s, 'Vt2.%1$s}, ['Xns]";
const char* form_3v = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s}, ['Xns]";
@ -2046,7 +2046,7 @@ void DisassemblingDecoder::VisitNEONLoadStoreMultiStruct(Instruction* instr) {
}
// Work out unallocated encodings.
bool allocated = (mnemonic != NULL);
bool allocated = (mnemonic != nullptr);
switch (instr->Mask(NEONLoadStoreMultiStructMask)) {
case NEON_LD2:
case NEON_LD3:
@ -2073,8 +2073,8 @@ void DisassemblingDecoder::VisitNEONLoadStoreMultiStruct(Instruction* instr) {
void DisassemblingDecoder::VisitNEONLoadStoreMultiStructPostIndex(
Instruction* instr) {
const char* mnemonic = NULL;
const char* form = NULL;
const char* mnemonic = nullptr;
const char* form = nullptr;
const char* form_1v = "{'Vt.%1$s}, ['Xns], 'Xmr1";
const char* form_2v = "{'Vt.%1$s, 'Vt2.%1$s}, ['Xns], 'Xmr2";
const char* form_3v = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s}, ['Xns], 'Xmr3";
@ -2144,7 +2144,7 @@ void DisassemblingDecoder::VisitNEONLoadStoreMultiStructPostIndex(
}
// Work out unallocated encodings.
bool allocated = (mnemonic != NULL);
bool allocated = (mnemonic != nullptr);
switch (instr->Mask(NEONLoadStoreMultiStructPostIndexMask)) {
case NEON_LD2_post:
case NEON_LD3_post:
@ -2170,8 +2170,8 @@ void DisassemblingDecoder::VisitNEONLoadStoreMultiStructPostIndex(
}
void DisassemblingDecoder::VisitNEONLoadStoreSingleStruct(Instruction* instr) {
const char* mnemonic = NULL;
const char* form = NULL;
const char* mnemonic = nullptr;
const char* form = nullptr;
const char* form_1b = "{'Vt.b}['IVLSLane0], ['Xns]";
const char* form_1h = "{'Vt.h}['IVLSLane1], ['Xns]";
@ -2294,7 +2294,7 @@ void DisassemblingDecoder::VisitNEONLoadStoreSingleStruct(Instruction* instr) {
}
// Work out unallocated encodings.
bool allocated = (mnemonic != NULL);
bool allocated = (mnemonic != nullptr);
switch (instr->Mask(NEONLoadStoreSingleStructMask)) {
case NEON_LD1_h:
case NEON_LD2_h:
@ -2342,8 +2342,8 @@ void DisassemblingDecoder::VisitNEONLoadStoreSingleStruct(Instruction* instr) {
void DisassemblingDecoder::VisitNEONLoadStoreSingleStructPostIndex(
Instruction* instr) {
const char* mnemonic = NULL;
const char* form = NULL;
const char* mnemonic = nullptr;
const char* form = nullptr;
const char* form_1b = "{'Vt.b}['IVLSLane0], ['Xns], 'Xmb1";
const char* form_1h = "{'Vt.h}['IVLSLane1], ['Xns], 'Xmb2";
@ -2455,7 +2455,7 @@ void DisassemblingDecoder::VisitNEONLoadStoreSingleStructPostIndex(
}
// Work out unallocated encodings.
bool allocated = (mnemonic != NULL);
bool allocated = (mnemonic != nullptr);
switch (instr->Mask(NEONLoadStoreSingleStructPostIndexMask)) {
case NEON_LD1_h_post:
case NEON_LD2_h_post:
@ -3355,10 +3355,10 @@ void DisassemblingDecoder::Format(Instruction* instr, const char* mnemonic,
const char* format) {
// TODO(mcapewel) don't think I can use the instr address here - there needs
// to be a base address too
DCHECK(mnemonic != NULL);
DCHECK_NOT_NULL(mnemonic);
ResetOutput();
Substitute(instr, mnemonic);
if (format != NULL) {
if (format != nullptr) {
buffer_[buffer_pos_++] = ' ';
Substitute(instr, format);
}
@ -3561,7 +3561,7 @@ int DisassemblingDecoder::SubstituteRegisterField(Instruction* instr,
int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
const char* format) {
DCHECK(format[0] == 'I');
DCHECK_EQ(format[0], 'I');
switch (format[1]) {
case 'M': { // IMoveImm or IMoveLSL.
@ -3572,7 +3572,7 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
if (!instr->SixtyFourBits()) imm &= UINT64_C(0xffffffff);
AppendToOutput("#0x%" PRIx64, imm);
} else {
DCHECK(format[5] == 'L');
DCHECK_EQ(format[5], 'L');
AppendToOutput("#0x%" PRIx64, instr->ImmMoveWide());
if (instr->ShiftMoveWide() > 0) {
AppendToOutput(", lsl #%d", 16 * instr->ShiftMoveWide());
@ -3617,7 +3617,7 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
return 6;
}
case 'A': { // IAddSub.
DCHECK(instr->ShiftAddSub() <= 1);
DCHECK_LE(instr->ShiftAddSub(), 1);
int64_t imm = instr->ImmAddSub() << (12 * instr->ShiftAddSub());
AppendToOutput("#0x%" PRIx64 " (%" PRId64 ")", imm, imm);
return 7;
@ -3795,7 +3795,7 @@ int DisassemblingDecoder::SubstituteBitfieldImmediateField(Instruction* instr,
AppendToOutput("#%d", s + 1);
return 5;
} else {
DCHECK(format[3] == '-');
DCHECK_EQ(format[3], '-');
AppendToOutput("#%d", s - r + 1);
return 7;
}
@ -3816,7 +3816,7 @@ int DisassemblingDecoder::SubstituteBitfieldImmediateField(Instruction* instr,
int DisassemblingDecoder::SubstituteLiteralField(Instruction* instr,
const char* format) {
DCHECK(strncmp(format, "LValue", 6) == 0);
DCHECK_EQ(strncmp(format, "LValue", 6), 0);
USE(format);
switch (instr->Mask(LoadLiteralMask)) {
@ -3858,7 +3858,7 @@ int DisassemblingDecoder::SubstituteShiftField(Instruction* instr,
int DisassemblingDecoder::SubstituteConditionField(Instruction* instr,
const char* format) {
DCHECK(format[0] == 'C');
DCHECK_EQ(format[0], 'C');
const char* condition_code[] = { "eq", "ne", "hs", "lo",
"mi", "pl", "vs", "vc",
"hi", "ls", "ge", "lt",
@ -3880,12 +3880,12 @@ int DisassemblingDecoder::SubstituteConditionField(Instruction* instr,
int DisassemblingDecoder::SubstitutePCRelAddressField(Instruction* instr,
const char* format) {
USE(format);
DCHECK(strncmp(format, "AddrPCRel", 9) == 0);
DCHECK_EQ(strncmp(format, "AddrPCRel", 9), 0);
int offset = instr->ImmPCRel();
// Only ADR (AddrPCRelByte) is supported.
DCHECK(strcmp(format, "AddrPCRelByte") == 0);
DCHECK_EQ(strcmp(format, "AddrPCRelByte"), 0);
char sign = '+';
if (offset < 0) {
@ -3927,8 +3927,8 @@ int DisassemblingDecoder::SubstituteBranchTargetField(Instruction* instr,
int DisassemblingDecoder::SubstituteExtendField(Instruction* instr,
const char* format) {
DCHECK(strncmp(format, "Ext", 3) == 0);
DCHECK(instr->ExtendMode() <= 7);
DCHECK_EQ(strncmp(format, "Ext", 3), 0);
DCHECK_LE(instr->ExtendMode(), 7);
USE(format);
const char* extend_mode[] = { "uxtb", "uxth", "uxtw", "uxtx",
@ -3954,7 +3954,7 @@ int DisassemblingDecoder::SubstituteExtendField(Instruction* instr,
int DisassemblingDecoder::SubstituteLSRegOffsetField(Instruction* instr,
const char* format) {
DCHECK(strncmp(format, "Offsetreg", 9) == 0);
DCHECK_EQ(strncmp(format, "Offsetreg", 9), 0);
const char* extend_mode[] = { "undefined", "undefined", "uxtw", "lsl",
"undefined", "undefined", "sxtw", "sxtx" };
USE(format);
@ -3983,7 +3983,7 @@ int DisassemblingDecoder::SubstituteLSRegOffsetField(Instruction* instr,
int DisassemblingDecoder::SubstitutePrefetchField(Instruction* instr,
const char* format) {
DCHECK(format[0] == 'P');
DCHECK_EQ(format[0], 'P');
USE(format);
int prefetch_mode = instr->PrefetchMode();
@ -3998,7 +3998,7 @@ int DisassemblingDecoder::SubstitutePrefetchField(Instruction* instr,
int DisassemblingDecoder::SubstituteBarrierField(Instruction* instr,
const char* format) {
DCHECK(format[0] == 'M');
DCHECK_EQ(format[0], 'M');
USE(format);
static const char* const options[4][4] = {

View File

@ -24,6 +24,14 @@ int InterpreterFrameConstants::RegisterStackSlotCount(int register_count) {
return RoundUp(register_count, 2);
}
int BuiltinContinuationFrameConstants::PaddingSlotCount(int register_count) {
// Round the total slot count up to a multiple of two, to make the frame a
// multiple of 16 bytes.
int slot_count = kFixedSlotCount + register_count;
int rounded_slot_count = RoundUp(slot_count, 2);
return rounded_slot_count - slot_count;
}
} // namespace internal
} // namespace v8

View File

@ -70,7 +70,7 @@ bool Instruction::IsStore() const {
static uint64_t RotateRight(uint64_t value,
unsigned int rotate,
unsigned int width) {
DCHECK(width <= 64);
DCHECK_LE(width, 64);
rotate &= 63;
return ((value & ((1UL << rotate) - 1UL)) << (width - rotate)) |
(value >> rotate);
@ -657,8 +657,8 @@ void NEONFormatDecoder::SetFormatMaps(const NEONFormatMap* format0,
const NEONFormatMap* format2) {
DCHECK_NOT_NULL(format0);
formats_[0] = format0;
formats_[1] = (format1 == NULL) ? formats_[0] : format1;
formats_[2] = (format2 == NULL) ? formats_[1] : format2;
formats_[1] = (format1 == nullptr) ? formats_[0] : format1;
formats_[2] = (format2 == nullptr) ? formats_[1] : format2;
}
void NEONFormatDecoder::SetFormatMap(unsigned index,

View File

@ -551,7 +551,7 @@ const Instr kImmExceptionIsDebug = 0xdeb0;
// Parameters are inlined in the code after a debug pseudo-instruction:
// - Debug code.
// - Debug parameters.
// - Debug message string. This is a NULL-terminated ASCII string, padded to
// - Debug message string. This is a nullptr-terminated ASCII string, padded to
// kInstructionSize so that subsequent instructions are correctly aligned.
// - A kImmExceptionIsUnreachable marker, to catch accidental execution of the
// string data.
@ -642,8 +642,8 @@ class NEONFormatDecoder {
// Set the format mapping for all or individual substitutions.
void SetFormatMaps(const NEONFormatMap* format0,
const NEONFormatMap* format1 = NULL,
const NEONFormatMap* format2 = NULL);
const NEONFormatMap* format1 = nullptr,
const NEONFormatMap* format2 = nullptr);
void SetFormatMap(unsigned index, const NEONFormatMap* format);
// Substitute %s in the input string with the placeholder string for each

View File

@ -9,7 +9,7 @@ namespace internal {
Counter::Counter(const char* name, CounterType type)
: count_(0), enabled_(false), type_(type) {
DCHECK(name != NULL);
DCHECK_NOT_NULL(name);
strncpy(name_, name, kCounterNameMaxLength);
}
@ -96,12 +96,11 @@ static const CounterDescriptor kCounterList[] = {
Instrument::Instrument(const char* datafile, uint64_t sample_period)
: output_stream_(stderr), sample_period_(sample_period) {
// Set up the output stream. If datafile is non-NULL, use that file. If it
// can't be opened, or datafile is NULL, use stderr.
if (datafile != NULL) {
// Set up the output stream. If datafile is non-nullptr, use that file. If it
// can't be opened, or datafile is nullptr, use stderr.
if (datafile != nullptr) {
output_stream_ = fopen(datafile, "w");
if (output_stream_ == NULL) {
if (output_stream_ == nullptr) {
fprintf(stderr, "Can't open output file %s. Using stderr.\n", datafile);
output_stream_ = stderr;
}

View File

@ -52,8 +52,9 @@ class Counter {
class Instrument: public DecoderVisitor {
public:
explicit Instrument(const char* datafile = NULL,
uint64_t sample_period = kDefaultInstrumentationSamplingPeriod);
explicit Instrument(
const char* datafile = nullptr,
uint64_t sample_period = kDefaultInstrumentationSamplingPeriod);
~Instrument();
// Declare all Visitor functions.

View File

@ -58,9 +58,6 @@ const Register StoreTransitionDescriptor::SlotRegister() { return x4; }
const Register StoreTransitionDescriptor::VectorRegister() { return x3; }
const Register StoreTransitionDescriptor::MapRegister() { return x5; }
const Register StringCompareDescriptor::LeftRegister() { return x1; }
const Register StringCompareDescriptor::RightRegister() { return x0; }
const Register ApiGetterDescriptor::HolderRegister() { return x0; }
const Register ApiGetterDescriptor::CallbackRegister() { return x3; }
@ -222,7 +219,7 @@ void ArrayConstructorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// kTarget, kNewTarget, kActualArgumentsCount, kAllocationSite
Register registers[] = {x1, x3, x0, x2};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
@ -232,7 +229,7 @@ void ArrayNoArgumentConstructorDescriptor::InitializePlatformSpecific(
// x2: allocation site with elements kind
// x0: number of arguments to the constructor function
Register registers[] = {x1, x2, x0};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
@ -242,7 +239,7 @@ void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
// x1: function
// x2: allocation site with elements kind
Register registers[] = {x1, x2, x0};
data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
data->InitializePlatformSpecific(arraysize(registers), registers, nullptr);
}
void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
@ -298,10 +295,10 @@ void ApiCallbackDescriptor::InitializePlatformSpecific(
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
x0, // callee
x4, // call_data
x2, // holder
x1, // api_function_address
JavaScriptFrame::context_register(), // callee context
x4, // call_data
x2, // holder
x1, // api_function_address
};
data->InitializePlatformSpecific(arraysize(registers), registers,
&default_descriptor);
@ -351,8 +348,7 @@ void ResumeGeneratorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
x0, // the value to pass to the generator
x1, // the JSGeneratorObject to resume
x2 // the resume mode (tagged)
x1 // the JSGeneratorObject to resume
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}

View File

@ -1045,7 +1045,7 @@ void TurboAssembler::Uxtw(const Register& rd, const Register& rn) {
void MacroAssembler::AlignAndSetCSPForFrame() {
int sp_alignment = ActivationFrameAlignment();
// AAPCS64 mandates at least 16-byte alignment.
DCHECK(sp_alignment >= 16);
DCHECK_GE(sp_alignment, 16);
DCHECK(base::bits::IsPowerOfTwo(sp_alignment));
Bic(csp, StackPointer(), sp_alignment - 1);
SetStackPointer(csp);
@ -1173,7 +1173,7 @@ void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
JumpIfSmi(value, NULL, not_smi_label);
JumpIfSmi(value, nullptr, not_smi_label);
}
@ -1206,14 +1206,14 @@ void MacroAssembler::JumpIfEitherSmi(Register value1,
void MacroAssembler::JumpIfEitherNotSmi(Register value1,
Register value2,
Label* not_smi_label) {
JumpIfBothSmi(value1, value2, NULL, not_smi_label);
JumpIfBothSmi(value1, value2, nullptr, not_smi_label);
}
void MacroAssembler::JumpIfBothNotSmi(Register value1,
Register value2,
Label* not_smi_label) {
JumpIfEitherSmi(value1, value2, NULL, not_smi_label);
JumpIfEitherSmi(value1, value2, nullptr, not_smi_label);
}
@ -1257,7 +1257,7 @@ void TurboAssembler::Push(Smi* smi) {
}
void TurboAssembler::Claim(int64_t count, uint64_t unit_size) {
DCHECK(count >= 0);
DCHECK_GE(count, 0);
uint64_t size = count * unit_size;
if (size == 0) {
@ -1265,7 +1265,7 @@ void TurboAssembler::Claim(int64_t count, uint64_t unit_size) {
}
if (csp.Is(StackPointer())) {
DCHECK(size % 16 == 0);
DCHECK_EQ(size % 16, 0);
} else {
BumpSystemStackPointer(size);
}
@ -1312,7 +1312,7 @@ void MacroAssembler::ClaimBySMI(const Register& count_smi, uint64_t unit_size) {
}
void TurboAssembler::Drop(int64_t count, uint64_t unit_size) {
DCHECK(count >= 0);
DCHECK_GE(count, 0);
uint64_t size = count * unit_size;
if (size == 0) {
@ -1322,7 +1322,7 @@ void TurboAssembler::Drop(int64_t count, uint64_t unit_size) {
Add(StackPointer(), StackPointer(), size);
if (csp.Is(StackPointer())) {
DCHECK(size % 16 == 0);
DCHECK_EQ(size % 16, 0);
} else if (emit_debug_code()) {
// It is safe to leave csp where it is when unwinding the JavaScript stack,
// but if we keep it matching StackPointer, the simulator can detect memory
@ -1353,14 +1353,24 @@ void TurboAssembler::Drop(const Register& count, uint64_t unit_size) {
}
}
void TurboAssembler::DropArguments(const Register& count, uint64_t unit_size) {
Drop(count, unit_size);
void TurboAssembler::DropArguments(const Register& count,
ArgumentsCountMode mode) {
if (mode == kCountExcludesReceiver) {
UseScratchRegisterScope temps(this);
Register tmp = temps.AcquireX();
Add(tmp, count, 1);
Drop(tmp);
} else {
Drop(count);
}
}
void TurboAssembler::DropSlots(int64_t count, uint64_t unit_size) {
Drop(count, unit_size);
}
void TurboAssembler::PushArgument(const Register& arg) { Push(arg); }
void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) {
DCHECK(unit_size == 0 || base::bits::IsPowerOfTwo(unit_size));
const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift;
@ -1404,7 +1414,7 @@ void TurboAssembler::TestAndBranchIfAnySet(const Register& reg,
const uint64_t bit_pattern,
Label* label) {
int bits = reg.SizeInBits();
DCHECK(CountSetBits(bit_pattern, bits) > 0);
DCHECK_GT(CountSetBits(bit_pattern, bits), 0);
if (CountSetBits(bit_pattern, bits) == 1) {
Tbnz(reg, MaskToBit(bit_pattern), label);
} else {
@ -1417,7 +1427,7 @@ void TurboAssembler::TestAndBranchIfAllClear(const Register& reg,
const uint64_t bit_pattern,
Label* label) {
int bits = reg.SizeInBits();
DCHECK(CountSetBits(bit_pattern, bits) > 0);
DCHECK_GT(CountSetBits(bit_pattern, bits), 0);
if (CountSetBits(bit_pattern, bits) == 1) {
Tbz(reg, MaskToBit(bit_pattern), label);
} else {
@ -1447,7 +1457,7 @@ void MacroAssembler::DisableInstrumentation() {
void MacroAssembler::AnnotateInstrumentation(const char* marker_name) {
DCHECK(strlen(marker_name) == 2);
DCHECK_EQ(strlen(marker_name), 2);
// We allow only printable characters in the marker names. Unprintable
// characters are reserved for controlling features of the instrumentation.

View File

@ -9,7 +9,7 @@
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h"
#include "src/callable.h"
#include "src/codegen.h"
#include "src/code-stubs.h"
#include "src/debug/debug.h"
#include "src/external-reference-table.h"
#include "src/frame-constants.h"
@ -53,45 +53,61 @@ TurboAssembler::TurboAssembler(Isolate* isolate, void* buffer, int buffer_size,
}
int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion1,
Register exclusion2,
Register exclusion3) const {
Register exclusion) const {
int bytes = 0;
auto list = kCallerSaved;
list.Remove(exclusion1, exclusion2, exclusion3);
DCHECK_EQ(list.Count() % 2, 0);
// We only allow one exclusion register, so if the list is of even length
// before exclusions, it must still be afterwards, to maintain alignment.
// Therefore, we can ignore the exclusion register in the computation.
// However, we leave it in the argument list to mirror the prototype for
// Push/PopCallerSaved().
USE(exclusion);
bytes += list.Count() * kXRegSizeInBits / 8;
if (fp_mode == kSaveFPRegs) {
DCHECK_EQ(kCallerSavedV.Count() % 2, 0);
bytes += kCallerSavedV.Count() * kDRegSizeInBits / 8;
}
return bytes;
}
int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion) {
int bytes = 0;
auto list = kCallerSaved;
list.Remove(exclusion1, exclusion2, exclusion3);
DCHECK_EQ(list.Count() % 2, 0);
if (!exclusion.Is(no_reg)) {
// Replace the excluded register with padding to maintain alignment.
list.Remove(exclusion);
list.Combine(padreg);
}
PushCPURegList(list);
bytes += list.Count() * kXRegSizeInBits / 8;
if (fp_mode == kSaveFPRegs) {
DCHECK_EQ(kCallerSavedV.Count() % 2, 0);
PushCPURegList(kCallerSavedV);
bytes += kCallerSavedV.Count() * kDRegSizeInBits / 8;
}
return bytes;
}
int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
Register exclusion2, Register exclusion3) {
int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) {
int bytes = 0;
if (fp_mode == kSaveFPRegs) {
DCHECK_EQ(kCallerSavedV.Count() % 2, 0);
PopCPURegList(kCallerSavedV);
bytes += kCallerSavedV.Count() * kDRegSizeInBits / 8;
}
auto list = kCallerSaved;
list.Remove(exclusion1, exclusion2, exclusion3);
DCHECK_EQ(list.Count() % 2, 0);
if (!exclusion.Is(no_reg)) {
// Replace the excluded register with padding to maintain alignment.
list.Remove(exclusion);
list.Combine(padreg);
}
PopCPURegList(list);
bytes += list.Count() * kXRegSizeInBits / 8;
@ -191,7 +207,7 @@ void TurboAssembler::LogicalMacro(const Register& rd, const Register& rn,
DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
// Add/sub extended supports shift <= 4. We want to support exactly the
// same modes here.
DCHECK(operand.shift_amount() <= 4);
DCHECK_LE(operand.shift_amount(), 4);
DCHECK(operand.reg().Is64Bits() ||
((operand.extend() != UXTX) && (operand.extend() != SXTX)));
Register temp = temps.AcquireSameSizeAs(rn);
@ -255,7 +271,7 @@ void TurboAssembler::Mov(const Register& rd, uint64_t imm) {
// Iterate through the halfwords. Use movn/movz for the first non-ignored
// halfword, and movk for subsequent halfwords.
DCHECK((reg_size % 16) == 0);
DCHECK_EQ(reg_size % 16, 0);
bool first_mov_done = false;
for (int i = 0; i < (rd.SizeInBits() / 16); i++) {
uint64_t imm16 = (imm >> (16 * i)) & 0xffffL;
@ -528,7 +544,7 @@ void TurboAssembler::Mvn(const Register& rd, const Operand& operand) {
}
unsigned TurboAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
DCHECK((reg_size % 8) == 0);
DCHECK_EQ(reg_size % 8, 0);
int count = 0;
for (unsigned i = 0; i < (reg_size / 16); i++) {
if ((imm & 0xffff) == 0) {
@ -765,7 +781,7 @@ void TurboAssembler::AddSubWithCarryMacro(const Register& rd,
DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
// Add/sub extended supports a shift <= 4. We want to support exactly the
// same modes.
DCHECK(operand.shift_amount() <= 4);
DCHECK_LE(operand.shift_amount(), 4);
DCHECK(operand.reg().Is64Bits() ||
((operand.extend() != UXTX) && (operand.extend() != SXTX)));
Register temp = temps.AcquireSameSizeAs(rn);
@ -876,13 +892,13 @@ void TurboAssembler::Adr(const Register& rd, Label* label, AdrHint hint) {
return;
}
DCHECK(hint == kAdrFar);
DCHECK_EQ(hint, kAdrFar);
if (label->is_bound()) {
int label_offset = label->pos() - pc_offset();
if (Instruction::IsValidPCRelOffset(label_offset)) {
adr(rd, label);
} else {
DCHECK(label_offset <= 0);
DCHECK_LE(label_offset, 0);
int min_adr_offset = -(1 << (Instruction::ImmPCRelRangeBitwidth - 1));
adr(rd, min_adr_offset);
Add(rd, rd, label_offset - min_adr_offset);
@ -1015,12 +1031,12 @@ void TurboAssembler::Abs(const Register& rd, const Register& rm,
// If the comparison sets the v flag, the input was the smallest value
// representable by rm, and the mathematical result of abs(rm) is not
// representable using two's complement.
if ((is_not_representable != NULL) && (is_representable != NULL)) {
if ((is_not_representable != nullptr) && (is_representable != nullptr)) {
B(is_not_representable, vs);
B(is_representable);
} else if (is_not_representable != NULL) {
} else if (is_not_representable != nullptr) {
B(is_not_representable, vs);
} else if (is_representable != NULL) {
} else if (is_representable != nullptr) {
B(is_representable, vc);
}
}
@ -1313,7 +1329,7 @@ void TurboAssembler::PushPreamble(Operand total_size) {
// on entry and the total size of the specified registers must also be a
// multiple of 16 bytes.
if (total_size.IsImmediate()) {
DCHECK((total_size.ImmediateValue() % 16) == 0);
DCHECK_EQ(total_size.ImmediateValue() % 16, 0);
}
// Don't check access size for non-immediate sizes. It's difficult to do
@ -1334,7 +1350,7 @@ void TurboAssembler::PopPostamble(Operand total_size) {
// on entry and the total size of the specified registers must also be a
// multiple of 16 bytes.
if (total_size.IsImmediate()) {
DCHECK((total_size.ImmediateValue() % 16) == 0);
DCHECK_EQ(total_size.ImmediateValue() % 16, 0);
}
// Don't check access size for non-immediate sizes. It's difficult to do
@ -1356,7 +1372,7 @@ void TurboAssembler::PopPostamble(int count, int size) {
void TurboAssembler::Poke(const CPURegister& src, const Operand& offset) {
if (offset.IsImmediate()) {
DCHECK(offset.ImmediateValue() >= 0);
DCHECK_GE(offset.ImmediateValue(), 0);
} else if (emit_debug_code()) {
Cmp(xzr, offset);
Check(le, kStackAccessBelowStackPointer);
@ -1368,7 +1384,7 @@ void TurboAssembler::Poke(const CPURegister& src, const Operand& offset) {
void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
if (offset.IsImmediate()) {
DCHECK(offset.ImmediateValue() >= 0);
DCHECK_GE(offset.ImmediateValue(), 0);
} else if (emit_debug_code()) {
Cmp(xzr, offset);
Check(le, kStackAccessBelowStackPointer);
@ -1482,6 +1498,85 @@ void TurboAssembler::AssertCspAligned() {
}
}
void TurboAssembler::CopySlots(int dst, Register src, Register slot_count) {
DCHECK(!src.IsZero());
UseScratchRegisterScope scope(this);
Register dst_reg = scope.AcquireX();
SlotAddress(dst_reg, dst);
SlotAddress(src, src);
CopyDoubleWords(dst_reg, src, slot_count);
}
void TurboAssembler::CopySlots(Register dst, Register src,
Register slot_count) {
DCHECK(!dst.IsZero() && !src.IsZero());
SlotAddress(dst, dst);
SlotAddress(src, src);
CopyDoubleWords(dst, src, slot_count);
}
void TurboAssembler::CopyDoubleWords(Register dst, Register src, Register count,
CopyDoubleWordsMode mode) {
DCHECK(!AreAliased(dst, src, count));
if (emit_debug_code()) {
Register pointer1 = dst;
Register pointer2 = src;
if (mode == kSrcLessThanDst) {
pointer1 = src;
pointer2 = dst;
}
// Copy requires pointer1 < pointer2 || (pointer1 - pointer2) >= count.
Label pointer1_below_pointer2;
Subs(pointer1, pointer1, pointer2);
B(lt, &pointer1_below_pointer2);
Cmp(pointer1, count);
Check(ge, kOffsetOutOfRange);
Bind(&pointer1_below_pointer2);
Add(pointer1, pointer1, pointer2);
}
static_assert(kPointerSize == kDRegSize,
"pointers must be the same size as doubles");
int direction = (mode == kDstLessThanSrc) ? 1 : -1;
UseScratchRegisterScope scope(this);
VRegister temp0 = scope.AcquireD();
VRegister temp1 = scope.AcquireD();
Label pairs, loop, done;
Tbz(count, 0, &pairs);
Ldr(temp0, MemOperand(src, direction * kPointerSize, PostIndex));
Sub(count, count, 1);
Str(temp0, MemOperand(dst, direction * kPointerSize, PostIndex));
Bind(&pairs);
if (mode == kSrcLessThanDst) {
// Adjust pointers for post-index ldp/stp with negative offset:
Sub(dst, dst, kPointerSize);
Sub(src, src, kPointerSize);
}
Bind(&loop);
Cbz(count, &done);
Ldp(temp0, temp1, MemOperand(src, 2 * direction * kPointerSize, PostIndex));
Sub(count, count, 2);
Stp(temp0, temp1, MemOperand(dst, 2 * direction * kPointerSize, PostIndex));
B(&loop);
// TODO(all): large copies may benefit from using temporary Q registers
// to copy four double words per iteration.
Bind(&done);
}
void TurboAssembler::SlotAddress(Register dst, int slot_offset) {
Add(dst, StackPointer(), slot_offset << kPointerSizeLog2);
}
void TurboAssembler::SlotAddress(Register dst, Register slot_offset) {
Add(dst, StackPointer(), Operand(slot_offset, LSL, kPointerSizeLog2));
}
void TurboAssembler::AssertFPCRState(Register fpcr) {
if (emit_debug_code()) {
Label unexpected_mode, done;
@ -1537,32 +1632,6 @@ void TurboAssembler::Move(Register dst, Register src) { Mov(dst, src); }
void TurboAssembler::Move(Register dst, Handle<HeapObject> x) { Mov(dst, x); }
void TurboAssembler::Move(Register dst, Smi* src) { Mov(dst, src); }
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
Ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
}
void MacroAssembler::LoadAccessor(Register dst, Register holder,
int accessor_index,
AccessorComponent accessor) {
Ldr(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
LoadInstanceDescriptors(dst, dst);
Ldr(dst,
FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
: AccessorPair::kSetterOffset;
Ldr(dst, FieldMemOperand(dst, offset));
}
void MacroAssembler::InNewSpace(Register object,
Condition cond,
Label* branch) {
DCHECK(cond == eq || cond == ne);
UseScratchRegisterScope temps(this);
CheckPageFlag(object, temps.AcquireSameSizeAs(object),
MemoryChunk::kIsInNewSpaceMask, cond, branch);
}
void TurboAssembler::AssertSmi(Register object, BailoutReason reason) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@ -1571,7 +1640,6 @@ void TurboAssembler::AssertSmi(Register object, BailoutReason reason) {
}
}
void MacroAssembler::AssertNotSmi(Register object, BailoutReason reason) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
@ -1626,12 +1694,9 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
Register temp = temps.AcquireX();
Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
// Load instance type
Ldrb(temp, FieldMemOperand(temp, Map::kInstanceTypeOffset));
Label do_check;
// Check if JSGeneratorObject
Cmp(temp, JS_GENERATOR_OBJECT_TYPE);
// Load instance type and check if JSGeneratorObject
CompareInstanceType(temp, temp, JS_GENERATOR_OBJECT_TYPE);
B(eq, &do_check);
// Check if JSAsyncGeneratorObject
@ -1642,9 +1707,10 @@ void MacroAssembler::AssertGeneratorObject(Register object) {
Check(eq, kOperandIsNotAGeneratorObject);
}
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
Register scratch) {
void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
if (emit_debug_code()) {
UseScratchRegisterScope temps(this);
Register scratch = temps.AcquireX();
Label done_checking;
AssertNotSmi(object);
JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &done_checking);
@ -1785,14 +1851,14 @@ void TurboAssembler::CallCFunction(Register function, int num_of_reg_args,
// BUILTIN_FP_CALL: double f(double)
// BUILTIN_FP_INT_CALL: double f(double, int)
if (num_of_double_args > 0) {
DCHECK(num_of_reg_args <= 1);
DCHECK((num_of_double_args + num_of_reg_args) <= 2);
DCHECK_LE(num_of_reg_args, 1);
DCHECK_LE(num_of_double_args + num_of_reg_args, 2);
}
// We rely on the frame alignment being 16 bytes, which means we never need
// to align the CSP by an unknown number of bytes and we always know the delta
// between the stack pointer and the frame pointer.
DCHECK(ActivationFrameAlignment() == 16);
DCHECK_EQ(ActivationFrameAlignment(), 16);
// If the stack pointer is not csp, we need to derive an aligned csp from the
// current stack pointer.
@ -1931,7 +1997,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) {
// Addresses are 48 bits so we never need to load the upper 16 bits.
uint64_t imm = reinterpret_cast<uint64_t>(target);
// If we don't use ARM tagged addresses, the 16 higher bits must be 0.
DCHECK(((imm >> 48) & 0xffff) == 0);
DCHECK_EQ((imm >> 48) & 0xffff, 0);
movz(temp, (imm >> 0) & 0xffff, 0);
movk(temp, (imm >> 16) & 0xffff, 16);
movk(temp, (imm >> 32) & 0xffff, 32);
@ -1958,6 +2024,38 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode) {
#endif
}
void TurboAssembler::Call(ExternalReference target) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
// Immediate is in charge of setting the relocation mode to
// EXTERNAL_REFERENCE.
Ldr(temp, Immediate(target));
Call(temp);
}
void TurboAssembler::CallForDeoptimization(Address target,
RelocInfo::Mode rmode) {
DCHECK_EQ(rmode, RelocInfo::RUNTIME_ENTRY);
BlockPoolsScope scope(this);
#ifdef DEBUG
Label start_call;
Bind(&start_call);
#endif
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
// Deoptimisation table entries require the call address to be in x16, in
// order to compute the entry id.
DCHECK(temp.Is(x16));
Ldr(temp, Immediate(reinterpret_cast<intptr_t>(target), rmode));
Blr(temp);
#ifdef DEBUG
AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode));
#endif
}
int TurboAssembler::CallSize(Register target) {
USE(target);
return kInstructionSize;
@ -2046,19 +2144,6 @@ void MacroAssembler::TryRepresentDoubleAsInt(Register as_int, VRegister value,
}
}
void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register type,
Label* not_unique_name) {
STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
// if ((type is string && type is internalized) || type == SYMBOL_TYPE) {
// continue
// } else {
// goto not_unique_name
// }
Tst(type, kIsNotStringMask | kIsNotInternalizedMask);
Ccmp(type, SYMBOL_TYPE, ZFlag, ne);
B(ne, not_unique_name);
}
void TurboAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
Register caller_args_count_reg,
Register scratch0, Register scratch1) {
@ -2196,38 +2281,31 @@ void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
ExternalReference::debug_hook_on_function_call_address(isolate());
Mov(x4, Operand(debug_hook_active));
Ldrsb(x4, MemOperand(x4));
CompareAndBranch(x4, Operand(0), eq, &skip_hook);
Cbz(x4, &skip_hook);
{
FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
if (expected.is_reg()) {
SmiTag(expected.reg());
Push(expected.reg());
}
if (actual.is_reg()) {
SmiTag(actual.reg());
Push(actual.reg());
}
if (new_target.is_valid()) {
Push(new_target);
}
Push(fun);
Push(fun);
Register expected_reg = padreg;
Register actual_reg = padreg;
if (expected.is_reg()) expected_reg = expected.reg();
if (actual.is_reg()) actual_reg = actual.reg();
if (!new_target.is_valid()) new_target = padreg;
// Save values on stack.
SmiTag(expected_reg);
SmiTag(actual_reg);
Push(expected_reg, actual_reg, new_target, fun);
PushArgument(fun);
CallRuntime(Runtime::kDebugOnFunctionCall);
Pop(fun);
if (new_target.is_valid()) {
Pop(new_target);
}
if (actual.is_reg()) {
Pop(actual.reg());
SmiUntag(actual.reg());
}
if (expected.is_reg()) {
Pop(expected.reg());
SmiUntag(expected.reg());
}
// Restore values from stack.
Pop(fun, new_target, actual_reg, expected_reg);
SmiUntag(actual_reg);
SmiUntag(expected_reg);
}
bind(&skip_hook);
Bind(&skip_hook);
}
void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
@ -2373,9 +2451,7 @@ void TurboAssembler::TruncateDoubleToIDelayed(Zone* zone, Register result,
// If we fell through then inline version didn't succeed - call stub instead.
Push(lr, double_input);
auto stub = new (zone) DoubleToIStub(nullptr, jssp, result, 0,
true, // is_truncating
true); // skip_fastpath
auto stub = new (zone) DoubleToIStub(nullptr, result);
// DoubleToIStub preserves any registers it needs to clobber.
CallStubDelayed(stub);
@ -2425,13 +2501,21 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
// csp[1] : type
// csp[0] : for alignment
} else {
DCHECK_EQ(type, StackFrame::CONSTRUCT);
DCHECK(jssp.Is(StackPointer()));
Mov(type_reg, StackFrame::TypeToMarker(type));
Push(lr, fp, type_reg);
Add(fp, jssp, TypedFrameConstants::kFixedFrameSizeFromFp);
// jssp[2] : lr
// jssp[1] : fp
// jssp[0] : type
// Users of this frame type push a context pointer after the type field,
// so do it here to keep the stack pointer aligned.
Push(lr, fp, type_reg, cp);
// The context pointer isn't part of the fixed frame, so add an extra slot
// to account for it.
Add(fp, jssp, TypedFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
// jssp[3] : lr
// jssp[2] : fp
// jssp[1] : type
// jssp[0] : cp
}
}
@ -2462,7 +2546,7 @@ void MacroAssembler::ExitFrameRestoreFPRegs() {
// Read the registers from the stack without popping them. The stack pointer
// will be reset as part of the unwinding process.
CPURegList saved_fp_regs = kCallerSavedV;
DCHECK(saved_fp_regs.Count() % 2 == 0);
DCHECK_EQ(saved_fp_regs.Count() % 2, 0);
int offset = ExitFrameConstants::kLastExitFrameField;
while (!saved_fp_regs.IsEmpty()) {
@ -2554,8 +2638,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
// Leave the current exit frame.
void MacroAssembler::LeaveExitFrame(bool restore_doubles,
const Register& scratch,
bool restore_context) {
const Register& scratch) {
DCHECK(csp.Is(StackPointer()));
if (restore_doubles) {
@ -2563,11 +2646,9 @@ void MacroAssembler::LeaveExitFrame(bool restore_doubles,
}
// Restore the context pointer from the top frame.
if (restore_context) {
Mov(scratch, Operand(ExternalReference(IsolateAddressId::kContextAddress,
isolate())));
Ldr(cp, MemOperand(scratch));
}
Mov(scratch,
Operand(ExternalReference(IsolateAddressId::kContextAddress, isolate())));
Ldr(cp, MemOperand(scratch));
if (emit_debug_code()) {
// Also emit debug code to clear the cp in the top frame.
@ -2593,7 +2674,7 @@ void MacroAssembler::LeaveExitFrame(bool restore_doubles,
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
DCHECK(value != 0);
DCHECK_NE(value, 0);
if (FLAG_native_code_counters && counter->Enabled()) {
Mov(scratch2, ExternalReference(counter));
Ldr(scratch1.W(), MemOperand(scratch2));
@ -2644,23 +2725,11 @@ void MacroAssembler::CompareObjectType(Register object,
void MacroAssembler::CompareInstanceType(Register map,
Register type_reg,
InstanceType type) {
Ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
Ldrh(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
Cmp(type_reg, type);
}
void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
Mov(value, Operand(cell));
Ldr(value, FieldMemOperand(value, WeakCell::kValueOffset));
}
void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
Label* miss) {
GetWeakValue(value, cell);
JumpIfSmi(value, miss);
}
void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) {
// Load the map's "bit field 2".
Ldrb(result, FieldMemOperand(map, Map::kBitField2Offset));
@ -2668,19 +2737,6 @@ void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) {
DecodeField<Map::ElementsKindBits>(result);
}
void MacroAssembler::GetMapConstructor(Register result, Register map,
Register temp, Register temp2) {
Label done, loop;
Ldr(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
Bind(&loop);
JumpIfSmi(result, &done);
CompareObjectType(result, temp, temp2, MAP_TYPE);
B(ne, &done);
Ldr(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
B(&loop);
Bind(&done);
}
void MacroAssembler::CompareRoot(const Register& obj,
Heap::RootListIndex index) {
UseScratchRegisterScope temps(this);
@ -2751,44 +2807,6 @@ bool TurboAssembler::AllowThisStubCall(CodeStub* stub) {
return has_frame() || !stub->SometimesSetsUpAFrame();
}
void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Register address, Register scratch1,
SaveFPRegsMode fp_mode) {
DCHECK(!AreAliased(object, address, scratch1));
Label done, store_buffer_overflow;
if (emit_debug_code()) {
Label ok;
JumpIfNotInNewSpace(object, &ok);
Abort(kRememberedSetPointerInNewSpace);
bind(&ok);
}
UseScratchRegisterScope temps(this);
Register scratch2 = temps.AcquireX();
// Load store buffer top.
Mov(scratch2, ExternalReference::store_buffer_top(isolate()));
Ldr(scratch1, MemOperand(scratch2));
// Store pointer to buffer and increment buffer top.
Str(address, MemOperand(scratch1, kPointerSize, PostIndex));
// Write back new top of buffer.
Str(scratch1, MemOperand(scratch2));
// Call stub on end of buffer.
// Check for end of buffer.
Tst(scratch1, StoreBuffer::kStoreBufferMask);
B(eq, &store_buffer_overflow);
Ret();
Bind(&store_buffer_overflow);
Push(lr);
StoreBufferOverflowStub store_buffer_overflow_stub(isolate(), fp_mode);
CallStub(&store_buffer_overflow_stub);
Pop(lr);
Bind(&done);
Ret();
}
void MacroAssembler::PopSafepointRegisters() {
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
DCHECK_GE(num_unsaved, 0);
@ -2812,7 +2830,7 @@ void MacroAssembler::PushSafepointRegisters() {
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// Make sure the safepoint registers list is what we expect.
DCHECK(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff);
DCHECK_EQ(CPURegList::GetSafepointSavedRegisters().list(), 0x6ffcffff);
// Safepoint registers are stored contiguously on the stack, but not all the
// registers are saved. The following registers are excluded:
@ -2909,7 +2927,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
}
void TurboAssembler::SaveRegisters(RegList registers) {
DCHECK(NumRegs(registers) > 0);
DCHECK_GT(NumRegs(registers), 0);
CPURegList regs(lr);
for (int i = 0; i < Register::kNumRegisters; ++i) {
if ((registers >> i) & 1u) {
@ -2921,7 +2939,7 @@ void TurboAssembler::SaveRegisters(RegList registers) {
}
void TurboAssembler::RestoreRegisters(RegList registers) {
DCHECK(NumRegs(registers) > 0);
DCHECK_GT(NumRegs(registers), 0);
CPURegList regs(lr);
for (int i = 0; i < Register::kNumRegisters; ++i) {
if ((registers >> i) & 1u) {
@ -3014,13 +3032,7 @@ void MacroAssembler::RecordWrite(Register object, Register address,
if (lr_status == kLRHasNotBeenSaved) {
Push(padreg, lr);
}
#ifdef V8_CSA_WRITE_BARRIER
CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
#else
RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
fp_mode);
CallStub(&stub);
#endif
if (lr_status == kLRHasNotBeenSaved) {
Pop(lr, padreg);
}
@ -3040,120 +3052,6 @@ void MacroAssembler::RecordWrite(Register object, Register address,
}
}
void MacroAssembler::AssertHasValidColor(const Register& reg) {
if (emit_debug_code()) {
// The bit sequence is backward. The first character in the string
// represents the least significant bit.
DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
Label color_is_valid;
Tbnz(reg, 0, &color_is_valid);
Tbz(reg, 1, &color_is_valid);
Abort(kUnexpectedColorFound);
Bind(&color_is_valid);
}
}
void MacroAssembler::GetMarkBits(Register addr_reg,
Register bitmap_reg,
Register shift_reg) {
DCHECK(!AreAliased(addr_reg, bitmap_reg, shift_reg));
DCHECK(addr_reg.Is64Bits() && bitmap_reg.Is64Bits() && shift_reg.Is64Bits());
// addr_reg is divided into fields:
// |63 page base 20|19 high 8|7 shift 3|2 0|
// 'high' gives the index of the cell holding color bits for the object.
// 'shift' gives the offset in the cell for this object's color.
const int kShiftBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
Ubfx(temp, addr_reg, kShiftBits, kPageSizeBits - kShiftBits);
Bic(bitmap_reg, addr_reg, Page::kPageAlignmentMask);
Add(bitmap_reg, bitmap_reg, Operand(temp, LSL, Bitmap::kBytesPerCellLog2));
// bitmap_reg:
// |63 page base 20|19 zeros 15|14 high 3|2 0|
Ubfx(shift_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
}
void MacroAssembler::HasColor(Register object,
Register bitmap_scratch,
Register shift_scratch,
Label* has_color,
int first_bit,
int second_bit) {
// See mark-compact.h for color definitions.
DCHECK(!AreAliased(object, bitmap_scratch, shift_scratch));
GetMarkBits(object, bitmap_scratch, shift_scratch);
Ldr(bitmap_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
// Shift the bitmap down to get the color of the object in bits [1:0].
Lsr(bitmap_scratch, bitmap_scratch, shift_scratch);
AssertHasValidColor(bitmap_scratch);
// These bit sequences are backwards. The first character in the string
// represents the least significant bit.
DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
// Check for the color.
if (first_bit == 0) {
// Checking for white.
DCHECK(second_bit == 0);
// We only need to test the first bit.
Tbz(bitmap_scratch, 0, has_color);
} else {
Label other_color;
// Checking for grey or black.
Tbz(bitmap_scratch, 0, &other_color);
if (second_bit == 0) {
Tbz(bitmap_scratch, 1, has_color);
} else {
Tbnz(bitmap_scratch, 1, has_color);
}
Bind(&other_color);
}
// Fall through if it does not have the right color.
}
void MacroAssembler::JumpIfBlack(Register object,
Register scratch0,
Register scratch1,
Label* on_black) {
DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
}
void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
Register shift_scratch, Register load_scratch,
Register length_scratch,
Label* value_is_white) {
DCHECK(!AreAliased(
value, bitmap_scratch, shift_scratch, load_scratch, length_scratch));
// These bit sequences are backwards. The first character in the string
// represents the least significant bit.
DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
GetMarkBits(value, bitmap_scratch, shift_scratch);
Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
Lsr(load_scratch, load_scratch, shift_scratch);
AssertHasValidColor(load_scratch);
// If the value is black or grey we don't need to do anything.
// Since both black and grey have a 1 in the first position and white does
// not have a 1 there we only need to check one bit.
Tbz(load_scratch, 0, value_is_white);
}
void TurboAssembler::Assert(Condition cond, BailoutReason reason) {
if (emit_debug_code()) {
Check(cond, reason);
@ -3399,7 +3297,7 @@ void TurboAssembler::CallPrintf(int arg_count, const CPURegister* args) {
dc32(arg_pattern_list); // kPrintfArgPatternListOffset
}
#else
Call(FUNCTION_ADDR(printf), RelocInfo::EXTERNAL_REFERENCE);
Call(ExternalReference::printf_function(isolate()));
#endif
}
@ -3541,7 +3439,7 @@ void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg,
}
InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
: reg_(NoReg), smi_check_delta_(0), smi_check_(NULL) {
: reg_(NoReg), smi_check_delta_(0), smi_check_(nullptr) {
InstructionSequence* inline_data = InstructionSequence::At(info);
DCHECK(inline_data->IsInlineData());
if (inline_data->IsInlineData()) {

View File

@ -659,6 +659,37 @@ class TurboAssembler : public Assembler {
// Emits a runtime assert that the CSP is aligned.
void AssertCspAligned();
// Copy slot_count stack slots from the stack offset specified by src to
// the stack offset specified by dst. The offsets and count are expressed in
// slot-sized units. Offset dst must be less than src, or the gap between
// them must be greater than or equal to slot_count, otherwise the result is
// unpredictable. The function may corrupt its register arguments. The
// registers must not alias each other.
void CopySlots(int dst, Register src, Register slot_count);
void CopySlots(Register dst, Register src, Register slot_count);
// Copy count double words from the address in register src to the address
// in register dst. There are two modes for this function:
// 1) Address dst must be less than src, or the gap between them must be
// greater than or equal to count double words, otherwise the result is
// unpredictable. This is the default mode.
// 2) Address src must be less than dst, or the gap between them must be
// greater than or equal to count double words, otherwise the result is
// undpredictable. In this mode, src and dst specify the last (highest)
// address of the regions to copy from and to.
// The case where src == dst is not supported.
// The function may corrupt its register arguments. The registers must not
// alias each other.
enum CopyDoubleWordsMode { kDstLessThanSrc, kSrcLessThanDst };
void CopyDoubleWords(Register dst, Register src, Register count,
CopyDoubleWordsMode mode = kDstLessThanSrc);
// Calculate the address of a double word-sized slot at slot_offset from the
// stack pointer, and write it to dst. Positive slot_offsets are at addresses
// greater than sp, with slot zero at sp.
void SlotAddress(Register dst, int slot_offset);
void SlotAddress(Register dst, Register slot_offset);
// Load a literal from the inline constant pool.
inline void Ldr(const CPURegister& rt, const Operand& imm);
// Helper function for double immediate.
@ -681,12 +712,15 @@ class TurboAssembler : public Assembler {
inline void Drop(const Register& count, uint64_t unit_size = kXRegSize);
// Drop arguments from stack without actually accessing memory.
// This will currently drop 'count' arguments of the given size from the
// stack.
// This will currently drop 'count' arguments from the stack.
// We assume the size of the arguments is the pointer size.
// An optional mode argument is passed, which can indicate we need to
// explicitly add the receiver to the count.
// TODO(arm64): Update this to round up the number of bytes dropped to
// a multiple of 16, so that we can remove jssp.
enum ArgumentsCountMode { kCountIncludesReceiver, kCountExcludesReceiver };
inline void DropArguments(const Register& count,
uint64_t unit_size = kXRegSize);
ArgumentsCountMode mode = kCountIncludesReceiver);
// Drop slots from stack without actually accessing memory.
// This will currently drop 'count' slots of the given size from the stack.
@ -694,6 +728,10 @@ class TurboAssembler : public Assembler {
// a multiple of 16, so that we can remove jssp.
inline void DropSlots(int64_t count, uint64_t unit_size = kXRegSize);
// Push a single argument to the stack.
// TODO(arm64): Update this to push a padding slot above the argument.
inline void PushArgument(const Register& arg);
// Re-synchronizes the system stack pointer (csp) with the current stack
// pointer (according to StackPointer()).
//
@ -802,20 +840,15 @@ class TurboAssembler : public Assembler {
// Calculate how much stack space (in bytes) are required to store caller
// registers excluding those specified in the arguments.
int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg) const;
Register exclusion) const;
// Push caller saved registers on the stack, and return the number of bytes
// stack pointer is adjusted.
int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion = no_reg);
// Restore caller saved registers from the stack, and return the number of
// bytes stack pointer is adjusted.
int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
Register exclusion2 = no_reg,
Register exclusion3 = no_reg);
int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion = no_reg);
// Move an immediate into register dst, and return an Operand object for use
// with a subsequent instruction that accepts a shift. The value moved into
@ -853,7 +886,7 @@ class TurboAssembler : public Assembler {
inline void Brk(int code);
inline void JumpIfSmi(Register value, Label* smi_label,
Label* not_smi_label = NULL);
Label* not_smi_label = nullptr);
inline void Fmov(VRegister fd, VRegister fn);
inline void Fmov(VRegister fd, Register rn);
@ -884,10 +917,9 @@ class TurboAssembler : public Assembler {
void Call(Label* target);
void Call(Address target, RelocInfo::Mode rmode);
void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
void Call(ExternalReference target);
void CallForDeoptimization(Address target, RelocInfo::Mode rmode) {
Call(target, rmode);
}
void CallForDeoptimization(Address target, RelocInfo::Mode rmode);
// For every Call variant, there is a matching CallSize function that returns
// the size (in bytes) of the call sequence.
@ -1197,7 +1229,8 @@ class TurboAssembler : public Assembler {
// If rm is the minimum representable value, the result is not representable.
// Handlers for each case can be specified using the relevant labels.
void Abs(const Register& rd, const Register& rm,
Label* is_not_representable = NULL, Label* is_representable = NULL);
Label* is_not_representable = nullptr,
Label* is_representable = nullptr);
inline void Cls(const Register& rd, const Register& rn);
inline void Cneg(const Register& rd, const Register& rn, Condition cond);
@ -1240,7 +1273,7 @@ class TurboAssembler : public Assembler {
// The 'args' argument should point to an array of variable arguments in their
// proper PCS registers (and in calling order). The argument registers can
// have mixed types. The format string (x0) should not be included.
void CallPrintf(int arg_count = 0, const CPURegister* args = NULL);
void CallPrintf(int arg_count = 0, const CPURegister* args = nullptr);
private:
bool has_frame_ = false;
@ -1634,18 +1667,6 @@ class MacroAssembler : public TurboAssembler {
// csp must be aligned to 16 bytes.
void PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset);
// Emit code that loads |parameter_index|'th parameter from the stack to
// the register according to the CallInterfaceDescriptor definition.
// |sp_to_caller_sp_offset_in_words| specifies the number of words pushed
// below the caller's sp.
template <class Descriptor>
void LoadParameterFromStack(
Register reg, typename Descriptor::ParameterIndices parameter_index,
int sp_to_ra_offset_in_words = 0) {
DCHECK(Descriptor::kPassLastArgsOnStack);
UNIMPLEMENTED();
}
// Variants of Claim and Drop, where the 'count' parameter is a SMI held in a
// register.
inline void ClaimBySMI(const Register& count_smi,
@ -1712,11 +1733,6 @@ class MacroAssembler : public TurboAssembler {
static int SafepointRegisterStackIndex(int reg_code);
void LoadInstanceDescriptors(Register map,
Register descriptors);
void LoadAccessor(Register dst, Register holder, int accessor_index,
AccessorComponent accessor);
template<typename Field>
void DecodeField(Register dst, Register src) {
static const int shift = Field::kShift;
@ -1741,14 +1757,12 @@ class MacroAssembler : public TurboAssembler {
inline void SmiTagAndPush(Register src1, Register src2);
inline void JumpIfNotSmi(Register value, Label* not_smi_label);
inline void JumpIfBothSmi(Register value1,
Register value2,
inline void JumpIfBothSmi(Register value1, Register value2,
Label* both_smi_label,
Label* not_smi_label = NULL);
inline void JumpIfEitherSmi(Register value1,
Register value2,
Label* not_smi_label = nullptr);
inline void JumpIfEitherSmi(Register value1, Register value2,
Label* either_smi_label,
Label* not_smi_label = NULL);
Label* not_smi_label = nullptr);
inline void JumpIfEitherNotSmi(Register value1,
Register value2,
Label* not_smi_label);
@ -1778,7 +1792,7 @@ class MacroAssembler : public TurboAssembler {
// Abort execution if argument is not undefined or an AllocationSite, enabled
// via --debug-code.
void AssertUndefinedOrAllocationSite(Register object, Register scratch);
void AssertUndefinedOrAllocationSite(Register object);
void JumpIfHeapNumber(Register object, Label* on_heap_number,
SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
@ -1792,17 +1806,13 @@ class MacroAssembler : public TurboAssembler {
// On output the Z flag is set if the operation was successful.
void TryRepresentDoubleAsInt64(Register as_int, VRegister value,
VRegister scratch_d,
Label* on_successful_conversion = NULL,
Label* on_failed_conversion = NULL) {
Label* on_successful_conversion = nullptr,
Label* on_failed_conversion = nullptr) {
DCHECK(as_int.Is64Bits());
TryRepresentDoubleAsInt(as_int, value, scratch_d, on_successful_conversion,
on_failed_conversion);
}
// ---- String Utilities ----
void JumpIfNotUniqueNameInstanceType(Register type, Label* not_unique_name);
// ---- Calling / Jumping helpers ----
void CallStub(CodeStub* stub);
@ -1867,11 +1877,6 @@ class MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// Support functions.
// Machine code version of Map::GetConstructor().
// |temp| holds |result|'s map when done, and |temp2| its instance type.
void GetMapConstructor(Register result, Register map, Register temp,
Register temp2);
// Compare object type for heap object. heap_object contains a non-Smi
// whose object type should be compared with the given type. This both
// sets the flags and leaves the object type in the type_reg register.
@ -1906,12 +1911,6 @@ class MacroAssembler : public TurboAssembler {
Register type_reg,
InstanceType type);
void GetWeakValue(Register value, Handle<WeakCell> cell);
// Load the value of the weak cell in the value register. Branch to the given
// miss label if the weak cell was cleared.
void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
// Load the elements kind field from a map, and return it in the result
// register.
void LoadElementsKindFromMap(Register result, Register map);
@ -1993,9 +1992,7 @@ class MacroAssembler : public TurboAssembler {
// * The stack pointer is reset to jssp.
//
// The stack pointer must be csp on entry.
void LeaveExitFrame(bool save_doubles,
const Register& scratch,
bool restore_context);
void LeaveExitFrame(bool save_doubles, const Register& scratch);
// Load the global proxy from the current context.
void LoadGlobalProxy(Register dst) {
@ -2013,13 +2010,6 @@ class MacroAssembler : public TurboAssembler {
// ---------------------------------------------------------------------------
// Garbage collector support (GC).
// Record in the remembered set the fact that we have a pointer to new space
// at the address pointed to by the addr register. Only works if addr is not
// in new space.
void RememberedSetHelper(Register object, // Used for debug code.
Register addr, Register scratch1,
SaveFPRegsMode save_fp);
// Push and pop the registers that can hold pointers, as defined by the
// RegList constant kSafepointSavedRegisters.
void PushSafepointRegisters();
@ -2028,18 +2018,6 @@ class MacroAssembler : public TurboAssembler {
void CheckPageFlag(const Register& object, const Register& scratch, int mask,
Condition cc, Label* condition_met);
// Check if object is in new space and jump accordingly.
// Register 'object' is preserved.
void JumpIfNotInNewSpace(Register object,
Label* branch) {
InNewSpace(object, ne, branch);
}
void JumpIfInNewSpace(Register object,
Label* branch) {
InNewSpace(object, eq, branch);
}
// Notify the garbage collector that we wrote a pointer into an object.
// |object| is the object being stored into, |value| is the object being
// stored. value and scratch registers are clobbered by the operation.
@ -2060,36 +2038,6 @@ class MacroAssembler : public TurboAssembler {
RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
SmiCheck smi_check = INLINE_SMI_CHECK);
// Checks the color of an object. If the object is white we jump to the
// incremental marker.
void JumpIfWhite(Register value, Register scratch1, Register scratch2,
Register scratch3, Register scratch4, Label* value_is_white);
// Helper for finding the mark bits for an address.
// Note that the behaviour slightly differs from other architectures.
// On exit:
// - addr_reg is unchanged.
// - The bitmap register points at the word with the mark bits.
// - The shift register contains the index of the first color bit for this
// object in the bitmap.
inline void GetMarkBits(Register addr_reg,
Register bitmap_reg,
Register shift_reg);
// Check if an object has a given incremental marking color.
void HasColor(Register object,
Register scratch0,
Register scratch1,
Label* has_color,
int first_bit,
int second_bit);
void JumpIfBlack(Register object,
Register scratch0,
Register scratch1,
Label* on_black);
// ---------------------------------------------------------------------------
// Debugging.
@ -2158,8 +2106,8 @@ class MacroAssembler : public TurboAssembler {
// On output the Z flag is set if the operation was successful.
void TryRepresentDoubleAsInt(Register as_int, VRegister value,
VRegister scratch_d,
Label* on_successful_conversion = NULL,
Label* on_failed_conversion = NULL);
Label* on_successful_conversion = nullptr,
Label* on_failed_conversion = nullptr);
public:
// Far branches resolving.
@ -2278,9 +2226,7 @@ class InlineSmiCheckInfo {
public:
explicit InlineSmiCheckInfo(Address info);
bool HasSmiCheck() const {
return smi_check_ != NULL;
}
bool HasSmiCheck() const { return smi_check_ != nullptr; }
const Register& SmiRegister() const {
return reg_;

View File

@ -72,9 +72,7 @@ void Simulator::TraceSim(const char* format, ...) {
}
}
const Instruction* Simulator::kEndOfSimAddress = NULL;
const Instruction* Simulator::kEndOfSimAddress = nullptr;
void SimSystemRegister::SetBits(int msb, int lsb, uint32_t bits) {
int width = msb - lsb + 1;
@ -82,7 +80,7 @@ void SimSystemRegister::SetBits(int msb, int lsb, uint32_t bits) {
bits <<= lsb;
uint32_t mask = ((1 << width) - 1) << lsb;
DCHECK((mask & write_ignore_mask_) == 0);
DCHECK_EQ(mask & write_ignore_mask_, 0);
value_ = (value_ & ~mask) | (bits & mask);
}
@ -111,10 +109,10 @@ void Simulator::Initialize(Isolate* isolate) {
Simulator* Simulator::current(Isolate* isolate) {
Isolate::PerIsolateThreadData* isolate_data =
isolate->FindOrAllocatePerThreadDataForThisThread();
DCHECK(isolate_data != NULL);
DCHECK_NOT_NULL(isolate_data);
Simulator* sim = isolate_data->simulator();
if (sim == NULL) {
if (sim == nullptr) {
if (FLAG_trace_sim || FLAG_log_instruction_stats || FLAG_debug_sim) {
sim = new Simulator(new Decoder<DispatchingDecoderVisitor>(), isolate);
} else {
@ -333,7 +331,7 @@ uintptr_t Simulator::PopAddress() {
intptr_t current_sp = sp();
uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
uintptr_t address = *stack_slot;
DCHECK(sizeof(uintptr_t) < 2 * kXRegSize);
DCHECK_LT(sizeof(uintptr_t), 2 * kXRegSize);
set_sp(current_sp + 2 * kXRegSize);
return address;
}
@ -352,11 +350,10 @@ uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
return stack_limit_ + 1024;
}
Simulator::Simulator(Decoder<DispatchingDecoderVisitor>* decoder,
Isolate* isolate, FILE* stream)
: decoder_(decoder),
last_debugger_input_(NULL),
last_debugger_input_(nullptr),
log_parameters_(NO_PARAM),
isolate_(isolate) {
// Setup the decoder.
@ -376,12 +373,11 @@ Simulator::Simulator(Decoder<DispatchingDecoderVisitor>* decoder,
}
}
Simulator::Simulator()
: decoder_(NULL),
last_debugger_input_(NULL),
: decoder_(nullptr),
last_debugger_input_(nullptr),
log_parameters_(NO_PARAM),
isolate_(NULL) {
isolate_(nullptr) {
Init(stdout);
CHECK(!FLAG_trace_sim && !FLAG_log_instruction_stats);
}
@ -414,7 +410,7 @@ void Simulator::ResetState() {
fpcr_ = SimSystemRegister::DefaultValueFor(FPCR);
// Reset registers to 0.
pc_ = NULL;
pc_ = nullptr;
for (unsigned i = 0; i < kNumberOfRegisters; i++) {
set_xreg(i, 0xbadbeef);
}
@ -473,7 +469,7 @@ class Redirection {
public:
Redirection(Isolate* isolate, void* external_function,
ExternalReference::Type type)
: external_function_(external_function), type_(type), next_(NULL) {
: external_function_(external_function), type_(type), next_(nullptr) {
redirect_call_.SetInstructionBits(
HLT | Assembler::ImmException(kImmExceptionIsRedirectedCall));
next_ = isolate->simulator_redirection();
@ -493,9 +489,9 @@ class Redirection {
static Redirection* Get(Isolate* isolate, void* external_function,
ExternalReference::Type type) {
Redirection* current = isolate->simulator_redirection();
for (; current != NULL; current = current->next_) {
if (current->external_function_ == external_function) {
DCHECK_EQ(current->type(), type);
for (; current != nullptr; current = current->next_) {
if (current->external_function_ == external_function &&
current->type_ == type) {
return current;
}
}
@ -2219,7 +2215,7 @@ void Simulator::LoadStoreWriteBack(unsigned addr_reg,
int64_t offset,
AddrMode addrmode) {
if ((addrmode == PreIndex) || (addrmode == PostIndex)) {
DCHECK(offset != 0);
DCHECK_NE(offset, 0);
uint64_t address = xreg(addr_reg, Reg31IsStackPointer);
set_reg(addr_reg, address + offset, Reg31IsStackPointer);
}
@ -2286,6 +2282,8 @@ void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) {
} else {
if (is_exclusive) {
unsigned rs = instr->Rs();
DCHECK_NE(rs, rt);
DCHECK_NE(rs, rn);
if (local_monitor_.NotifyStoreExcl(address,
get_transaction_size(access_size)) &&
global_monitor_.Pointer()->NotifyStoreExcl_Locked(
@ -2570,7 +2568,7 @@ void Simulator::VisitDataProcessing3Source(Instruction* instr) {
case UMADDL_x: result = xreg(instr->Ra()) + (rn_u32 * rm_u32); break;
case UMSUBL_x: result = xreg(instr->Ra()) - (rn_u32 * rm_u32); break;
case SMULH_x:
DCHECK(instr->Ra() == kZeroRegCode);
DCHECK_EQ(instr->Ra(), kZeroRegCode);
result = MultiplyHighSigned(xreg(instr->Rn()), xreg(instr->Rm()));
break;
default: UNIMPLEMENTED();
@ -3216,12 +3214,12 @@ void Simulator::Debug() {
PrintInstructionsAt(pc_, 1);
// Read the command line.
char* line = ReadLine("sim> ");
if (line == NULL) {
if (line == nullptr) {
break;
} else {
// Repeat last command by default.
char* last_input = last_debugger_input();
if (strcmp(line, "\n") == 0 && (last_input != NULL)) {
if (strcmp(line, "\n") == 0 && (last_input != nullptr)) {
DeleteArray(line);
line = last_input;
} else {
@ -3341,8 +3339,8 @@ void Simulator::Debug() {
// stack / mem ----------------------------------------------------------
} else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
int64_t* cur = NULL;
int64_t* end = NULL;
int64_t* cur = nullptr;
int64_t* end = nullptr;
int next_arg = 1;
if (strcmp(cmd, "stack") == 0) {
@ -3504,7 +3502,7 @@ void Simulator::VisitException(Instruction* instr) {
// We are going to break, so printing something is not an issue in
// terms of speed.
if (FLAG_trace_sim_messages || FLAG_trace_sim || (parameters & BREAK)) {
if (message != NULL) {
if (message != nullptr) {
PrintF(stream_,
"# %sDebugger hit %d: %s%s%s\n",
clr_debug_number,
@ -3539,7 +3537,7 @@ void Simulator::VisitException(Instruction* instr) {
break;
default:
// We don't support a one-shot LOG_DISASM.
DCHECK((parameters & LOG_DISASM) == 0);
DCHECK_EQ(parameters & LOG_DISASM, 0);
// Don't print information that is already being traced.
parameters &= ~log_parameters();
// Print the requested information.
@ -3554,7 +3552,7 @@ void Simulator::VisitException(Instruction* instr) {
pc_ = pc_->InstructionAtOffset(RoundUp(size, kInstructionSize));
// - Verify that the unreachable marker is present.
DCHECK(pc_->Mask(ExceptionMask) == HLT);
DCHECK(pc_->ImmException() == kImmExceptionIsUnreachable);
DCHECK_EQ(pc_->ImmException(), kImmExceptionIsUnreachable);
// - Skip past the unreachable marker.
set_pc(pc_->following());
@ -4341,7 +4339,7 @@ void Simulator::VisitNEONByIndexedElement(Instruction* instr) {
SimVRegister& rd = vreg(instr->Rd());
SimVRegister& rn = vreg(instr->Rn());
ByElementOp Op = NULL;
ByElementOp Op = nullptr;
int rm_reg = instr->Rm();
int index = (instr->NEONH() << 1) | instr->NEONL();
@ -5275,7 +5273,7 @@ void Simulator::VisitNEONScalarByIndexedElement(Instruction* instr) {
SimVRegister& rd = vreg(instr->Rd());
SimVRegister& rn = vreg(instr->Rn());
ByElementOp Op = NULL;
ByElementOp Op = nullptr;
int rm_reg = instr->Rm();
int index = (instr->NEONH() << 1) | instr->NEONL();
@ -5717,8 +5715,8 @@ void Simulator::DoPrintf(Instruction* instr) {
instr + kPrintfArgPatternListOffset,
sizeof(arg_pattern_list));
DCHECK(arg_count <= kPrintfMaxArgCount);
DCHECK((arg_pattern_list >> (kPrintfArgPatternBits * arg_count)) == 0);
DCHECK_LE(arg_count, kPrintfMaxArgCount);
DCHECK_EQ(arg_pattern_list >> (kPrintfArgPatternBits * arg_count), 0);
// We need to call the host printf function with a set of arguments defined by
// arg_pattern_list. Because we don't know the types and sizes of the
@ -5730,7 +5728,7 @@ void Simulator::DoPrintf(Instruction* instr) {
// Leave enough space for one extra character per expected argument (plus the
// '\0' termination).
const char * format_base = reg<const char *>(0);
DCHECK(format_base != NULL);
DCHECK_NOT_NULL(format_base);
size_t length = strlen(format_base) + 1;
char * const format = new char[length + arg_count];

View File

@ -690,8 +690,7 @@ class Simulator : public DecoderVisitor {
}
explicit Simulator(Decoder<DispatchingDecoderVisitor>* decoder,
Isolate* isolate = NULL,
FILE* stream = stderr);
Isolate* isolate = nullptr, FILE* stream = stderr);
Simulator();
~Simulator();
@ -1700,9 +1699,9 @@ class Simulator : public DecoderVisitor {
LogicVRegister Table(VectorFormat vform, LogicVRegister dst,
const LogicVRegister& ind, bool zero_out_of_bounds,
const LogicVRegister* tab1,
const LogicVRegister* tab2 = NULL,
const LogicVRegister* tab3 = NULL,
const LogicVRegister* tab4 = NULL);
const LogicVRegister* tab2 = nullptr,
const LogicVRegister* tab3 = nullptr,
const LogicVRegister* tab4 = nullptr);
LogicVRegister tbl(VectorFormat vform, LogicVRegister dst,
const LogicVRegister& tab, const LogicVRegister& ind);
LogicVRegister tbl(VectorFormat vform, LogicVRegister dst,
@ -2206,7 +2205,7 @@ class Simulator : public DecoderVisitor {
// functions, or to save and restore it when entering and leaving generated
// code.
void AssertSupportedFPCR() {
DCHECK(fpcr().FZ() == 0); // No flush-to-zero support.
DCHECK_EQ(fpcr().FZ(), 0); // No flush-to-zero support.
DCHECK(fpcr().RMode() == FPTieEven); // Ties-to-even rounding only.
// The simulator does not support half-precision operations so fpcr().AHP()

View File

@ -2159,7 +2159,7 @@ LogicVRegister Simulator::Table(VectorFormat vform, LogicVRegister dst,
uint64_t j = ind.Uint(vform, i);
int tab_idx = static_cast<int>(j >> 4);
int j_idx = static_cast<int>(j & 15);
if ((tab_idx < 4) && (tab[tab_idx] != NULL)) {
if ((tab_idx < 4) && (tab[tab_idx] != nullptr)) {
result[i] = tab[tab_idx]->Uint(kFormat16B, j_idx);
}
}

View File

@ -105,10 +105,10 @@ int CountTrailingZeros(uint64_t value, int width) {
int CountSetBits(uint64_t value, int width) {
DCHECK((width == 32) || (width == 64));
if (width == 64) {
return static_cast<int>(base::bits::CountPopulation64(value));
return static_cast<int>(base::bits::CountPopulation(value));
}
return static_cast<int>(base::bits::CountPopulation32(
static_cast<uint32_t>(value & 0xfffffffff)));
return static_cast<int>(
base::bits::CountPopulation(static_cast<uint32_t>(value & 0xfffffffff)));
}
int LowestSetBitPosition(uint64_t value) {

View File

@ -145,12 +145,11 @@ void ReportCompilationSuccess(Handle<Script> script, int position,
}
// Hook to report failed execution of {AsmJs::CompileAsmViaWasm} phase.
void ReportCompilationFailure(Handle<Script> script, int position,
void ReportCompilationFailure(ParseInfo* parse_info, int position,
const char* reason) {
if (FLAG_suppress_asm_messages) return;
Vector<const char> text = CStrVector(reason);
Report(script, position, text, MessageTemplate::kAsmJsInvalid,
v8::Isolate::kMessageWarning);
parse_info->pending_error_handler()->ReportWarningAt(
position, position, MessageTemplate::kAsmJsInvalid, reason);
}
// Hook to report successful execution of {AsmJs::InstantiateAsmWasm} phase.
@ -187,69 +186,70 @@ void ReportInstantiationFailure(Handle<Script> script, int position,
class AsmJsCompilationJob final : public CompilationJob {
public:
explicit AsmJsCompilationJob(ParseInfo* parse_info, FunctionLiteral* literal,
Isolate* isolate)
: CompilationJob(isolate, parse_info, &compilation_info_, "AsmJs"),
zone_(isolate->allocator(), ZONE_NAME),
compilation_info_(&zone_, isolate, parse_info, literal),
AccountingAllocator* allocator)
: CompilationJob(parse_info->stack_limit(), parse_info,
&compilation_info_, "AsmJs", State::kReadyToExecute),
allocator_(allocator),
zone_(allocator, ZONE_NAME),
compilation_info_(&zone_, parse_info, literal),
module_(nullptr),
asm_offsets_(nullptr),
translate_time_(0),
compile_time_(0) {}
compile_time_(0),
module_source_size_(0),
translate_time_micro_(0),
translate_zone_size_(0) {}
protected:
Status PrepareJobImpl() final;
Status PrepareJobImpl(Isolate* isolate) final;
Status ExecuteJobImpl() final;
Status FinalizeJobImpl() final;
Status FinalizeJobImpl(Isolate* isolate) final;
private:
void RecordHistograms(Isolate* isolate);
AccountingAllocator* allocator_;
Zone zone_;
CompilationInfo compilation_info_;
wasm::ZoneBuffer* module_;
wasm::ZoneBuffer* asm_offsets_;
wasm::AsmJsParser::StdlibSet stdlib_uses_;
double translate_time_; // Time (milliseconds) taken to execute step [1].
double compile_time_; // Time (milliseconds) taken to execute step [2].
double translate_time_; // Time (milliseconds) taken to execute step [1].
double compile_time_; // Time (milliseconds) taken to execute step [2].
int module_source_size_; // Module source size in bytes.
int64_t translate_time_micro_; // Time (microseconds) taken to translate.
size_t translate_zone_size_;
DISALLOW_COPY_AND_ASSIGN(AsmJsCompilationJob);
};
CompilationJob::Status AsmJsCompilationJob::PrepareJobImpl() {
CompilationJob::Status AsmJsCompilationJob::PrepareJobImpl(Isolate* isolate) {
UNREACHABLE(); // Prepare should always be skipped.
return SUCCEEDED;
}
CompilationJob::Status AsmJsCompilationJob::ExecuteJobImpl() {
// Step 1: Translate asm.js module to WebAssembly module.
HistogramTimerScope translate_time_scope(
compilation_info()->isolate()->counters()->asm_wasm_translation_time());
size_t compile_zone_start = compilation_info()->zone()->allocation_size();
base::ElapsedTimer translate_timer;
translate_timer.Start();
Zone* compile_zone = compilation_info()->zone();
Zone translate_zone(compilation_info()->isolate()->allocator(), ZONE_NAME);
Zone translate_zone(allocator_, ZONE_NAME);
Utf16CharacterStream* stream = parse_info()->character_stream();
base::Optional<AllowHandleDereference> allow_deref;
if (stream->can_access_heap()) {
DCHECK(
ThreadId::Current().Equals(compilation_info()->isolate()->thread_id()));
allow_deref.emplace();
}
stream->Seek(compilation_info()->literal()->start_position());
wasm::AsmJsParser parser(&translate_zone, stack_limit(), stream);
if (!parser.Run()) {
// TODO(rmcilroy): Temporarily allow heap access here until we have a
// mechanism for delaying pending messages.
DCHECK(
ThreadId::Current().Equals(compilation_info()->isolate()->thread_id()));
AllowHeapAllocation allow_allocation;
AllowHandleAllocation allow_handles;
allow_deref.emplace();
DCHECK(!compilation_info()->isolate()->has_pending_exception());
ReportCompilationFailure(parse_info()->script(), parser.failure_location(),
parser.failure_message());
if (!FLAG_suppress_asm_messages) {
ReportCompilationFailure(parse_info(), parser.failure_location(),
parser.failure_message());
}
return FAILED;
}
module_ = new (compile_zone) wasm::ZoneBuffer(compile_zone);
@ -260,50 +260,32 @@ CompilationJob::Status AsmJsCompilationJob::ExecuteJobImpl() {
size_t compile_zone_size =
compilation_info()->zone()->allocation_size() - compile_zone_start;
size_t translate_zone_size = translate_zone.allocation_size();
compilation_info()
->isolate()
->counters()
->asm_wasm_translation_peak_memory_bytes()
->AddSample(static_cast<int>(translate_zone_size));
translate_zone_size_ = translate_zone.allocation_size();
translate_time_ = translate_timer.Elapsed().InMillisecondsF();
int module_size = compilation_info()->literal()->end_position() -
compilation_info()->literal()->start_position();
compilation_info()->isolate()->counters()->asm_module_size_bytes()->AddSample(
module_size);
int64_t translate_time_micro = translate_timer.Elapsed().InMicroseconds();
int translation_throughput =
translate_time_micro != 0
? static_cast<int>(static_cast<int64_t>(module_size) /
translate_time_micro)
: 0;
compilation_info()
->isolate()
->counters()
->asm_wasm_translation_throughput()
->AddSample(translation_throughput);
translate_time_micro_ = translate_timer.Elapsed().InMicroseconds();
module_source_size_ = compilation_info()->literal()->end_position() -
compilation_info()->literal()->start_position();
if (FLAG_trace_asm_parser) {
PrintF(
"[asm.js translation successful: time=%0.3fms, "
"translate_zone=%" PRIuS "KB, compile_zone+=%" PRIuS "KB]\n",
translate_time_, translate_zone_size / KB, compile_zone_size / KB);
translate_time_, translate_zone_size_ / KB, compile_zone_size / KB);
}
return SUCCEEDED;
}
CompilationJob::Status AsmJsCompilationJob::FinalizeJobImpl() {
CompilationJob::Status AsmJsCompilationJob::FinalizeJobImpl(Isolate* isolate) {
// Step 2: Compile and decode the WebAssembly module.
base::ElapsedTimer compile_timer;
compile_timer.Start();
Handle<HeapNumber> uses_bitset =
compilation_info()->isolate()->factory()->NewHeapNumberFromBits(
stdlib_uses_.ToIntegral());
isolate->factory()->NewHeapNumberFromBits(stdlib_uses_.ToIntegral());
wasm::ErrorThrower thrower(compilation_info()->isolate(), "AsmJs::Compile");
wasm::ErrorThrower thrower(isolate, "AsmJs::Compile");
Handle<WasmModuleObject> compiled =
SyncCompileTranslatedAsmJs(
compilation_info()->isolate(), &thrower,
isolate, &thrower,
wasm::ModuleWireBytes(module_->begin(), module_->end()),
parse_info()->script(),
Vector<const byte>(asm_offsets_->begin(), asm_offsets_->size()))
@ -313,24 +295,41 @@ CompilationJob::Status AsmJsCompilationJob::FinalizeJobImpl() {
// The result is a compiled module and serialized standard library uses.
Handle<FixedArray> result =
compilation_info()->isolate()->factory()->NewFixedArray(
kWasmDataEntryCount);
isolate->factory()->NewFixedArray(kWasmDataEntryCount);
result->set(kWasmDataCompiledModule, *compiled);
result->set(kWasmDataUsesBitSet, *uses_bitset);
compilation_info()->SetAsmWasmData(result);
compilation_info()->SetCode(
BUILTIN_CODE(compilation_info()->isolate(), InstantiateAsmJs));
compilation_info()->SetCode(BUILTIN_CODE(isolate, InstantiateAsmJs));
RecordHistograms(isolate);
ReportCompilationSuccess(parse_info()->script(),
compilation_info()->literal()->position(),
translate_time_, compile_time_, module_->size());
return SUCCEEDED;
}
void AsmJsCompilationJob::RecordHistograms(Isolate* isolate) {
Counters* counters = isolate->counters();
counters->asm_wasm_translation_time()->AddSample(
static_cast<int>(translate_time_micro_));
counters->asm_wasm_translation_peak_memory_bytes()->AddSample(
static_cast<int>(translate_zone_size_));
counters->asm_module_size_bytes()->AddSample(module_source_size_);
// translation_throughput is not exact (assumes MB == 1000000). But that is ok
// since the metric is stored in buckets that lose some precision anyways.
int translation_throughput =
translate_time_micro_ != 0
? static_cast<int>(static_cast<int64_t>(module_source_size_) /
translate_time_micro_)
: 0;
counters->asm_wasm_translation_throughput()->AddSample(
translation_throughput);
}
CompilationJob* AsmJs::NewCompilationJob(ParseInfo* parse_info,
FunctionLiteral* literal,
Isolate* isolate) {
return new AsmJsCompilationJob(parse_info, literal, isolate);
AccountingAllocator* allocator) {
return new AsmJsCompilationJob(parse_info, literal, allocator);
}
MaybeHandle<Object> AsmJs::InstantiateAsmWasm(Isolate* isolate,

View File

@ -12,6 +12,7 @@
namespace v8 {
namespace internal {
class AccountingAllocator;
class CompilationInfo;
class CompilationJob;
class FunctionLiteral;
@ -24,7 +25,7 @@ class AsmJs {
public:
static CompilationJob* NewCompilationJob(ParseInfo* parse_info,
FunctionLiteral* literal,
Isolate* isolate);
AccountingAllocator* allocator);
static MaybeHandle<Object> InstantiateAsmWasm(Isolate* isolate,
Handle<SharedFunctionInfo>,
Handle<FixedArray> wasm_data,

View File

@ -230,7 +230,7 @@ wasm::AsmJsParser::VarInfo* AsmJsParser::GetVarInfo(
}
uint32_t AsmJsParser::VarIndex(VarInfo* info) {
DCHECK(info->kind == VarKind::kGlobal);
DCHECK_EQ(info->kind, VarKind::kGlobal);
return info->index + static_cast<uint32_t>(global_imports_.size());
}
@ -292,6 +292,9 @@ void AsmJsParser::Begin(AsmJsScanner::token_t label) {
void AsmJsParser::Loop(AsmJsScanner::token_t label) {
BareBegin(BlockKind::kLoop, label);
int position = static_cast<int>(scanner_.Position());
DCHECK_EQ(position, scanner_.Position());
current_function_builder_->AddAsmWasmOffset(position, position);
current_function_builder_->EmitWithU8(kExprLoop, kLocalVoid);
}
@ -308,7 +311,7 @@ void AsmJsParser::BareBegin(BlockKind kind, AsmJsScanner::token_t label) {
}
void AsmJsParser::BareEnd() {
DCHECK(block_stack_.size() > 0);
DCHECK_GT(block_stack_.size(), 0);
block_stack_.pop_back();
}
@ -797,7 +800,7 @@ void AsmJsParser::ValidateFunction() {
}
function_info = GetVarInfo(function_name);
if (function_info->type->IsA(AsmType::None())) {
DCHECK(function_info->kind == VarKind::kFunction);
DCHECK_EQ(function_info->kind, VarKind::kFunction);
function_info->type = function_type;
} else if (!function_type->IsA(function_info->type)) {
// TODO(bradnelson): Should IsExactly be used here?
@ -1164,18 +1167,18 @@ void AsmJsParser::DoStatement() {
RECURSE(ValidateStatement());
EXPECT_TOKEN(TOK(while));
End();
// }
// } // end c
EXPECT_TOKEN('(');
RECURSE(Expression(AsmType::Int()));
// if (CONDITION) break a;
// if (!CONDITION) break a;
current_function_builder_->Emit(kExprI32Eqz);
current_function_builder_->EmitWithU8(kExprBrIf, 1);
// continue b;
current_function_builder_->EmitWithU8(kExprBr, 0);
EXPECT_TOKEN(')');
// }
// } // end b
End();
// }
// } // end a
End();
SkipSemicolon();
}
@ -1195,13 +1198,16 @@ void AsmJsParser::ForStatement() {
// a: block {
Begin(pending_label_);
// b: loop {
Loop(pending_label_);
Loop();
// c: block { // but treated like loop so continue works
BareBegin(BlockKind::kLoop, pending_label_);
current_function_builder_->EmitWithU8(kExprBlock, kLocalVoid);
pending_label_ = 0;
if (!Peek(';')) {
// if (CONDITION) break a;
// if (!CONDITION) break a;
RECURSE(Expression(AsmType::Int()));
current_function_builder_->Emit(kExprI32Eqz);
current_function_builder_->EmitWithU8(kExprBrIf, 1);
current_function_builder_->EmitWithU8(kExprBrIf, 2);
}
EXPECT_TOKEN(';');
// Race past INCREMENT
@ -1210,18 +1216,21 @@ void AsmJsParser::ForStatement() {
EXPECT_TOKEN(')');
// BODY
RECURSE(ValidateStatement());
// INCREMENT
// } // end c
End();
// INCREMENT
size_t end_position = scanner_.Position();
scanner_.Seek(increment_position);
if (!Peek(')')) {
RECURSE(Expression(nullptr));
// NOTE: No explicit drop because below break is an implicit drop.
}
// continue b;
current_function_builder_->EmitWithU8(kExprBr, 0);
scanner_.Seek(end_position);
// }
// } // end b
End();
// }
// } // end a
End();
}
@ -1392,11 +1401,10 @@ AsmType* AsmJsParser::NumericLiteral() {
if (uvalue <= 0x7fffffff) {
current_function_builder_->EmitI32Const(static_cast<int32_t>(uvalue));
return AsmType::FixNum();
} else if (uvalue <= 0xffffffff) {
} else {
DCHECK_LE(uvalue, 0xffffffff);
current_function_builder_->EmitI32Const(static_cast<int32_t>(uvalue));
return AsmType::Unsigned();
} else {
FAILn("Integer numeric literal out of range.");
}
} else {
FAILn("Expected numeric literal.");
@ -2195,7 +2203,7 @@ AsmType* AsmJsParser::ValidateCall() {
if (return_type->IsA(AsmType::Float())) {
FAILn("Imported function can't be called as float");
}
DCHECK(function_info->import != nullptr);
DCHECK_NOT_NULL(function_info->import);
// TODO(bradnelson): Factor out.
uint32_t index;
auto it = function_info->import->cache.find(sig);

View File

@ -255,15 +255,15 @@ void AsmJsScanner::ConsumeIdentifier(uc32 ch) {
}
}
if (preceding_token_ == '.') {
CHECK(global_count_ < kMaxIdentifierCount);
CHECK_LT(global_count_, kMaxIdentifierCount);
token_ = kGlobalsStart + global_count_++;
property_names_[identifier_string_] = token_;
} else if (in_local_scope_) {
CHECK(local_names_.size() < kMaxIdentifierCount);
CHECK_LT(local_names_.size(), kMaxIdentifierCount);
token_ = kLocalsStart - static_cast<token_t>(local_names_.size());
local_names_[identifier_string_] = token_;
} else {
CHECK(global_count_ < kMaxIdentifierCount);
CHECK_LT(global_count_, kMaxIdentifierCount);
token_ = kGlobalsStart + global_count_++;
global_names_[identifier_string_] = token_;
}

View File

@ -228,8 +228,8 @@ class AsmMinMaxType final : public AsmCallableType {
} // namespace
AsmType* AsmType::MinMaxType(Zone* zone, AsmType* dest, AsmType* src) {
DCHECK(dest->AsValueType() != nullptr);
DCHECK(src->AsValueType() != nullptr);
DCHECK_NOT_NULL(dest->AsValueType());
DCHECK_NOT_NULL(src->AsValueType());
auto* MinMax = new (zone) AsmMinMaxType(dest, src);
return reinterpret_cast<AsmType*>(MinMax);
}
@ -300,7 +300,7 @@ bool AsmOverloadedFunctionType::CanBeInvokedWith(
}
void AsmOverloadedFunctionType::AddOverload(AsmType* overload) {
DCHECK(overload->AsCallableType() != nullptr);
DCHECK_NOT_NULL(overload->AsCallableType());
overloads_.push_back(overload);
}

View File

@ -83,8 +83,8 @@ class AsmValueType {
}
bitset_t Bitset() const {
DCHECK((reinterpret_cast<uintptr_t>(this) & kAsmValueTypeTag) ==
kAsmValueTypeTag);
DCHECK_EQ(reinterpret_cast<uintptr_t>(this) & kAsmValueTypeTag,
kAsmValueTypeTag);
return static_cast<bitset_t>(reinterpret_cast<uintptr_t>(this) &
~kAsmValueTypeTag);
}

View File

@ -163,9 +163,9 @@ AssemblerBase::AssemblerBase(IsolateData isolate_data, void* buffer,
predictable_code_size_(false),
constant_pool_available_(false),
jump_optimization_info_(nullptr) {
own_buffer_ = buffer == NULL;
own_buffer_ = buffer == nullptr;
if (buffer_size == 0) buffer_size = kMinimalBufferSize;
DCHECK(buffer_size > 0);
DCHECK_GT(buffer_size, 0);
if (own_buffer_) buffer = NewArray<byte>(buffer_size);
buffer_ = static_cast<byte*>(buffer);
buffer_size_ = buffer_size;
@ -313,27 +313,23 @@ void RelocInfo::set_global_handle(Isolate* isolate, Address address,
set_embedded_address(isolate, address, icache_flush_mode);
}
Address RelocInfo::wasm_call_address() const {
DCHECK_EQ(rmode_, WASM_CALL);
return Assembler::target_address_at(pc_, constant_pool_);
}
void RelocInfo::set_wasm_call_address(Isolate* isolate, Address address,
ICacheFlushMode icache_flush_mode) {
DCHECK_EQ(rmode_, WASM_CALL);
Assembler::set_target_address_at(isolate, pc_, constant_pool_, address,
icache_flush_mode);
}
Address RelocInfo::global_handle() const {
DCHECK_EQ(rmode_, WASM_GLOBAL_HANDLE);
return embedded_address();
}
void RelocInfo::update_wasm_global_reference(
Isolate* isolate, Address old_base, Address new_base,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsWasmGlobalReference(rmode_));
Address updated_reference;
DCHECK_LE(old_base, wasm_global_reference());
updated_reference = new_base + (wasm_global_reference() - old_base);
DCHECK_LE(new_base, updated_reference);
set_embedded_address(isolate, updated_reference, icache_flush_mode);
}
Address RelocInfo::wasm_global_reference() const {
DCHECK(IsWasmGlobalReference(rmode_));
return embedded_address();
}
uint32_t RelocInfo::wasm_function_table_size_reference() const {
DCHECK(IsWasmFunctionTableSizeReference(rmode_));
return embedded_size();
@ -354,10 +350,10 @@ void RelocInfo::update_wasm_function_table_size_reference(
void RelocInfo::set_target_address(Isolate* isolate, Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
Assembler::set_target_address_at(isolate, pc_, host_, target,
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != nullptr &&
IsCodeTarget(rmode_)) {
Code* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(host(), this,
@ -372,7 +368,7 @@ uint32_t RelocInfoWriter::WriteLongPCJump(uint32_t pc_delta) {
if (is_uintn(pc_delta, kSmallPCDeltaBits)) return pc_delta;
WriteMode(RelocInfo::PC_JUMP);
uint32_t pc_jump = pc_delta >> kSmallPCDeltaBits;
DCHECK(pc_jump > 0);
DCHECK_GT(pc_jump, 0);
// Write kChunkBits size chunks of the pc_jump.
for (; pc_jump > 0; pc_jump = pc_jump >> kChunkBits) {
byte b = pc_jump & kChunkMask;
@ -428,7 +424,7 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
byte* begin_pos = pos_;
#endif
DCHECK(rinfo->rmode() < RelocInfo::NUMBER_OF_MODES);
DCHECK(rinfo->pc() - last_pc_ >= 0);
DCHECK_GE(rinfo->pc() - last_pc_, 0);
// Use unsigned delta-encoding for pc.
uint32_t pc_delta = static_cast<uint32_t>(rinfo->pc() - last_pc_);
@ -437,7 +433,7 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
WriteShortTaggedPC(pc_delta, kEmbeddedObjectTag);
} else if (rmode == RelocInfo::CODE_TARGET) {
WriteShortTaggedPC(pc_delta, kCodeTargetTag);
DCHECK(begin_pos - pos_ <= RelocInfo::kMaxCallSize);
DCHECK_LE(begin_pos - pos_, RelocInfo::kMaxCallSize);
} else if (rmode == RelocInfo::DEOPT_REASON) {
DCHECK(rinfo->data() < (1 << kBitsPerByte));
WriteShortTaggedPC(pc_delta, kLocatableTag);
@ -448,15 +444,14 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
WriteData(rinfo->data());
} else if (RelocInfo::IsConstPool(rmode) ||
RelocInfo::IsVeneerPool(rmode) || RelocInfo::IsDeoptId(rmode) ||
RelocInfo::IsDeoptPosition(rmode) ||
RelocInfo::IsWasmProtectedLanding(rmode)) {
RelocInfo::IsDeoptPosition(rmode)) {
WriteIntData(static_cast<int>(rinfo->data()));
}
}
last_pc_ = rinfo->pc();
last_mode_ = rmode;
#ifdef DEBUG
DCHECK(begin_pos - pos_ <= kMaxSize);
DCHECK_LE(begin_pos - pos_, kMaxSize);
#endif
}
@ -536,7 +531,7 @@ void RelocIterator::next() {
return;
}
} else {
DCHECK(tag == kDefaultTag);
DCHECK_EQ(tag, kDefaultTag);
RelocInfo::Mode rmode = GetMode();
if (rmode == RelocInfo::PC_JUMP) {
AdvanceReadLongPCJump();
@ -551,8 +546,7 @@ void RelocIterator::next() {
} else if (RelocInfo::IsConstPool(rmode) ||
RelocInfo::IsVeneerPool(rmode) ||
RelocInfo::IsDeoptId(rmode) ||
RelocInfo::IsDeoptPosition(rmode) ||
RelocInfo::IsWasmProtectedLanding(rmode)) {
RelocInfo::IsDeoptPosition(rmode)) {
if (SetMode(rmode)) {
AdvanceReadInt();
return;
@ -571,6 +565,7 @@ RelocIterator::RelocIterator(Code* code, int mode_mask) {
rinfo_.host_ = code;
rinfo_.pc_ = code->instruction_start();
rinfo_.data_ = 0;
rinfo_.constant_pool_ = code->constant_pool();
// Relocation info is read backwards.
pos_ = code->relocation_start() + code->relocation_size();
end_ = code->relocation_start();
@ -592,6 +587,21 @@ RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) {
next();
}
RelocIterator::RelocIterator(Vector<byte> instructions,
Vector<const byte> reloc_info, Address const_pool,
int mode_mask) {
rinfo_.pc_ = instructions.start();
rinfo_.data_ = 0;
rinfo_.constant_pool_ = const_pool;
// Relocation info is read backwards.
pos_ = reloc_info.start() + reloc_info.size();
end_ = reloc_info.start();
done_ = false;
mode_mask_ = mode_mask;
if (mode_mask_ == 0) pos_ = end_;
next();
}
// -----------------------------------------------------------------------------
// Implementation of RelocInfo
@ -643,14 +653,14 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "veneer pool";
case WASM_CONTEXT_REFERENCE:
return "wasm context reference";
case WASM_GLOBAL_REFERENCE:
return "wasm global value reference";
case WASM_FUNCTION_TABLE_SIZE_REFERENCE:
return "wasm function table size reference";
case WASM_PROTECTED_INSTRUCTION_LANDING:
return "wasm protected instruction landing";
case WASM_GLOBAL_HANDLE:
return "global handle";
case WASM_CALL:
return "internal wasm call";
case JS_TO_WASM_CALL:
return "js to wasm call";
case NUMBER_OF_MODES:
case PC_JUMP:
UNREACHABLE();
@ -679,8 +689,7 @@ void RelocInfo::Print(Isolate* isolate, std::ostream& os) { // NOLINT
Code* code = Code::GetCodeFromTargetAddress(target_address());
os << " (" << Code::Kind2String(code->kind()) << ") ("
<< static_cast<const void*>(target_address()) << ")";
} else if (IsRuntimeEntry(rmode_) &&
isolate->deoptimizer_data() != NULL) {
} else if (IsRuntimeEntry(rmode_) && isolate->deoptimizer_data() != nullptr) {
// Depotimization bailouts are stored as runtime entries.
int id = Deoptimizer::GetDeoptimizationId(
isolate, target_address(), Deoptimizer::EAGER);
@ -704,7 +713,7 @@ void RelocInfo::Verify(Isolate* isolate) {
case CODE_TARGET: {
// convert inline target address to code object
Address addr = target_address();
CHECK(addr != NULL);
CHECK_NOT_NULL(addr);
// Check that we can find the right code object.
Code* code = Code::GetCodeFromTargetAddress(addr);
Object* found = isolate->FindCodeObject(addr);
@ -731,11 +740,10 @@ void RelocInfo::Verify(Isolate* isolate) {
case CONST_POOL:
case VENEER_POOL:
case WASM_CONTEXT_REFERENCE:
case WASM_GLOBAL_REFERENCE:
case WASM_FUNCTION_TABLE_SIZE_REFERENCE:
case WASM_GLOBAL_HANDLE:
case WASM_PROTECTED_INSTRUCTION_LANDING:
// TODO(eholk): make sure the protected instruction is in range.
case WASM_CALL:
case JS_TO_WASM_CALL:
case NONE32:
case NONE64:
break;
@ -773,10 +781,9 @@ ExternalReference::ExternalReference(Address address, Isolate* isolate)
: address_(Redirect(isolate, address)) {}
ExternalReference::ExternalReference(
ApiFunction* fun,
Type type = ExternalReference::BUILTIN_CALL,
Isolate* isolate = NULL)
: address_(Redirect(isolate, fun->address(), type)) {}
ApiFunction* fun, Type type = ExternalReference::BUILTIN_CALL,
Isolate* isolate = nullptr)
: address_(Redirect(isolate, fun->address(), type)) {}
ExternalReference::ExternalReference(Runtime::FunctionId id, Isolate* isolate)
: ExternalReference(Runtime::FunctionForId(id), isolate) {}
@ -853,7 +860,7 @@ ExternalReference ExternalReference::date_cache_stamp(Isolate* isolate) {
void ExternalReference::set_redirector(
Isolate* isolate, ExternalReferenceRedirector* redirector) {
// We can't stack them.
DCHECK(isolate->external_reference_redirector() == NULL);
DCHECK_NULL(isolate->external_reference_redirector());
isolate->set_external_reference_redirector(
reinterpret_cast<ExternalReferenceRedirectorPointer*>(redirector));
}
@ -862,6 +869,10 @@ ExternalReference ExternalReference::stress_deopt_count(Isolate* isolate) {
return ExternalReference(isolate->stress_deopt_count_address());
}
ExternalReference ExternalReference::force_slow_path(Isolate* isolate) {
return ExternalReference(isolate->force_slow_path_address());
}
ExternalReference ExternalReference::new_deoptimizer_function(
Isolate* isolate) {
return ExternalReference(
@ -1028,7 +1039,7 @@ ExternalReference ExternalReference::wasm_clear_thread_in_wasm_flag(
static void f64_mod_wrapper(double* param0, double* param1) {
WriteDoubleValue(param0,
modulo(ReadDoubleValue(param0), ReadDoubleValue(param1)));
Modulo(ReadDoubleValue(param0), ReadDoubleValue(param1)));
}
ExternalReference ExternalReference::f64_mod_wrapper_function(
@ -1077,11 +1088,6 @@ ExternalReference ExternalReference::address_of_regexp_stack_limit(
return ExternalReference(isolate->regexp_stack()->limit_address());
}
ExternalReference ExternalReference::address_of_regexp_dotall_flag(
Isolate* isolate) {
return ExternalReference(&FLAG_harmony_regexp_dotall);
}
ExternalReference ExternalReference::store_buffer_top(Isolate* isolate) {
return ExternalReference(isolate->heap()->store_buffer_top_address());
}
@ -1397,6 +1403,10 @@ ExternalReference ExternalReference::libc_memset_function(Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(libc_memset)));
}
ExternalReference ExternalReference::printf_function(Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(std::printf)));
}
template <typename SubjectChar, typename PatternChar>
ExternalReference ExternalReference::search_string_raw(Isolate* isolate) {
auto f = SearchStringRaw<SubjectChar, PatternChar>;
@ -1415,6 +1425,13 @@ ExternalReference ExternalReference::get_or_create_hash_raw(Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f)));
}
ExternalReference ExternalReference::jsreceiver_create_identity_hash(
Isolate* isolate) {
typedef Smi* (*CreateIdentityHash)(Isolate * isolate, JSReceiver * key);
CreateIdentityHash f = JSReceiver::CreateIdentityHash;
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f)));
}
ExternalReference
ExternalReference::copy_fast_number_jsarray_elements_to_typed_array(
Isolate* isolate) {
@ -1542,6 +1559,8 @@ double power_double_double(double x, double y) {
return Pow(x, y);
}
double modulo_double_double(double x, double y) { return Modulo(x, y); }
ExternalReference ExternalReference::power_double_double_function(
Isolate* isolate) {
return ExternalReference(Redirect(isolate,
@ -1551,9 +1570,8 @@ ExternalReference ExternalReference::power_double_double_function(
ExternalReference ExternalReference::mod_two_doubles_operation(
Isolate* isolate) {
return ExternalReference(Redirect(isolate,
FUNCTION_ADDR(modulo),
BUILTIN_FP_FP_CALL));
return ExternalReference(Redirect(
isolate, FUNCTION_ADDR(modulo_double_double), BUILTIN_FP_FP_CALL));
}
ExternalReference ExternalReference::debug_last_step_action_address(
@ -1685,7 +1703,7 @@ void ConstantPoolBuilder::EmitSharedEntries(Assembler* assm,
std::vector<ConstantPoolEntry>& shared_entries = info.shared_entries;
const int entry_size = ConstantPoolEntry::size(type);
int base = emitted_label_.pos();
DCHECK(base > 0);
DCHECK_GT(base, 0);
int shared_end = static_cast<int>(shared_entries.size());
std::vector<ConstantPoolEntry>::iterator shared_it = shared_entries.begin();
for (int i = 0; i < shared_end; i++, shared_it++) {
@ -1713,7 +1731,7 @@ void ConstantPoolBuilder::EmitGroup(Assembler* assm,
std::vector<ConstantPoolEntry>& shared_entries = info.shared_entries;
const int entry_size = ConstantPoolEntry::size(type);
int base = emitted_label_.pos();
DCHECK(base > 0);
DCHECK_GT(base, 0);
int begin;
int end;
@ -1842,7 +1860,7 @@ void SetUpJSCallerSavedCodeData() {
for (int r = 0; r < kNumRegs; r++)
if ((kJSCallerSaved & (1 << r)) != 0) caller_saved_codes[i++] = r;
DCHECK(i == kNumJSCallerSaved);
DCHECK_EQ(i, kNumJSCallerSaved);
}
int JSCallerSavedCode(int n) {

View File

@ -54,6 +54,9 @@ namespace v8 {
class ApiFunction;
namespace internal {
namespace wasm {
class WasmCode;
}
// Forward declarations.
class Isolate;
@ -287,12 +290,12 @@ class CpuFeatures : public AllStatic {
static inline bool SupportsWasmSimd128();
static inline unsigned icache_line_size() {
DCHECK(icache_line_size_ != 0);
DCHECK_NE(icache_line_size_, 0);
return icache_line_size_;
}
static inline unsigned dcache_line_size() {
DCHECK(dcache_line_size_ != 0);
DCHECK_NE(dcache_line_size_, 0);
return dcache_line_size_;
}
@ -364,10 +367,10 @@ class RelocInfo {
// wasm code. Everything after WASM_CONTEXT_REFERENCE (inclusive) is not
// GC'ed.
WASM_CONTEXT_REFERENCE,
WASM_GLOBAL_REFERENCE,
WASM_FUNCTION_TABLE_SIZE_REFERENCE,
WASM_PROTECTED_INSTRUCTION_LANDING,
WASM_GLOBAL_HANDLE,
WASM_CALL,
JS_TO_WASM_CALL,
RUNTIME_ENTRY,
COMMENT,
@ -423,6 +426,7 @@ class RelocInfo {
static inline bool IsRuntimeEntry(Mode mode) {
return mode == RUNTIME_ENTRY;
}
static inline bool IsWasmCall(Mode mode) { return mode == WASM_CALL; }
// Is the relocation mode affected by GC?
static inline bool IsGCRelocMode(Mode mode) {
return mode <= LAST_GCED_ENUM;
@ -460,9 +464,6 @@ class RelocInfo {
static inline bool IsWasmContextReference(Mode mode) {
return mode == WASM_CONTEXT_REFERENCE;
}
static inline bool IsWasmGlobalReference(Mode mode) {
return mode == WASM_GLOBAL_REFERENCE;
}
static inline bool IsWasmFunctionTableSizeReference(Mode mode) {
return mode == WASM_FUNCTION_TABLE_SIZE_REFERENCE;
}
@ -473,11 +474,8 @@ class RelocInfo {
return IsWasmFunctionTableSizeReference(mode);
}
static inline bool IsWasmPtrReference(Mode mode) {
return mode == WASM_CONTEXT_REFERENCE || mode == WASM_GLOBAL_REFERENCE ||
mode == WASM_GLOBAL_HANDLE;
}
static inline bool IsWasmProtectedLanding(Mode mode) {
return mode == WASM_PROTECTED_INSTRUCTION_LANDING;
return mode == WASM_CONTEXT_REFERENCE || mode == WASM_GLOBAL_HANDLE ||
mode == WASM_CALL || mode == JS_TO_WASM_CALL;
}
static inline int ModeMask(Mode mode) { return 1 << mode; }
@ -488,7 +486,6 @@ class RelocInfo {
Mode rmode() const { return rmode_; }
intptr_t data() const { return data_; }
Code* host() const { return host_; }
void set_host(Code* host) { host_ = host; }
// Apply a relocation by delta bytes. When the code object is moved, PC
// relative addresses have to be updated as well as absolute addresses
@ -506,17 +503,14 @@ class RelocInfo {
bool IsInConstantPool();
Address wasm_context_reference() const;
Address wasm_global_reference() const;
uint32_t wasm_function_table_size_reference() const;
uint32_t wasm_memory_size_reference() const;
Address global_handle() const;
Address js_to_wasm_address() const;
Address wasm_call_address() const;
void set_wasm_context_reference(
Isolate* isolate, Address address,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void update_wasm_global_reference(
Isolate* isolate, Address old_base, Address new_base,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void update_wasm_function_table_size_reference(
Isolate* isolate, uint32_t old_base, uint32_t new_base,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
@ -528,6 +522,12 @@ class RelocInfo {
void set_global_handle(
Isolate* isolate, Address address,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void set_wasm_call_address(
Isolate*, Address,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void set_js_to_wasm_address(
Isolate*, Address,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
// this relocation applies to;
// can only be called if IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
@ -625,7 +625,11 @@ class RelocInfo {
byte* pc_;
Mode rmode_;
intptr_t data_;
// TODO(mtrofin): try remove host_, if all we need is the constant_pool_ or
// other few attributes, like start address, etc. This is so that we can reuse
// RelocInfo for WasmCode without having a modal design.
Code* host_;
Address constant_pool_ = nullptr;
friend class RelocIterator;
};
@ -634,7 +638,7 @@ class RelocInfo {
// lower addresses.
class RelocInfoWriter BASE_EMBEDDED {
public:
RelocInfoWriter() : pos_(NULL), last_pc_(NULL) {}
RelocInfoWriter() : pos_(nullptr), last_pc_(nullptr) {}
RelocInfoWriter(byte* pos, byte* pc) : pos_(pos), last_pc_(pc) {}
byte* pos() const { return pos_; }
@ -691,6 +695,11 @@ class RelocIterator: public Malloced {
// iteration iff bit k of mode_mask is set.
explicit RelocIterator(Code* code, int mode_mask = -1);
explicit RelocIterator(const CodeDesc& desc, int mode_mask = -1);
explicit RelocIterator(Vector<byte> instructions,
Vector<const byte> reloc_info, Address const_pool,
int mode_mask = -1);
RelocIterator(RelocIterator&&) = default;
RelocIterator& operator=(RelocIterator&&) = default;
// Iteration
bool done() const { return done_; }
@ -725,8 +734,8 @@ class RelocIterator: public Malloced {
return (mode_mask_ & (1 << mode)) ? (rinfo_.rmode_ = mode, true) : false;
}
byte* pos_;
byte* end_;
const byte* pos_;
const byte* end_;
RelocInfo rinfo_;
bool done_;
int mode_mask_;
@ -799,7 +808,7 @@ class ExternalReference BASE_EMBEDDED {
typedef void* ExternalReferenceRedirector(Isolate* isolate, void* original,
Type type);
ExternalReference() : address_(NULL) {}
ExternalReference() : address_(nullptr) {}
ExternalReference(Address address, Isolate* isolate);
@ -898,9 +907,6 @@ class ExternalReference BASE_EMBEDDED {
// Static variable RegExpStack::limit_address()
static ExternalReference address_of_regexp_stack_limit(Isolate* isolate);
// Direct access to FLAG_harmony_regexp_dotall.
static ExternalReference address_of_regexp_dotall_flag(Isolate* isolate);
// Static variables for RegExp.
static ExternalReference address_of_static_offsets_vector(Isolate* isolate);
static ExternalReference address_of_regexp_stack_memory_address(
@ -969,6 +975,8 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference libc_memmove_function(Isolate* isolate);
static ExternalReference libc_memset_function(Isolate* isolate);
static ExternalReference printf_function(Isolate* isolate);
static ExternalReference try_internalize_string_function(Isolate* isolate);
static ExternalReference check_object_type(Isolate* isolate);
@ -984,6 +992,7 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference orderedhashmap_gethash_raw(Isolate* isolate);
static ExternalReference get_or_create_hash_raw(Isolate* isolate);
static ExternalReference jsreceiver_create_identity_hash(Isolate* isolate);
static ExternalReference copy_fast_number_jsarray_elements_to_typed_array(
Isolate* isolate);
@ -1045,6 +1054,8 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference stress_deopt_count(Isolate* isolate);
static ExternalReference force_slow_path(Isolate* isolate);
static ExternalReference fixed_typed_array_base_data_offset();
private:
@ -1058,8 +1069,9 @@ class ExternalReference BASE_EMBEDDED {
reinterpret_cast<ExternalReferenceRedirector*>(
isolate->external_reference_redirector());
void* address = reinterpret_cast<void*>(address_arg);
void* answer =
(redirector == NULL) ? address : (*redirector)(isolate, address, type);
void* answer = (redirector == nullptr)
? address
: (*redirector)(isolate, address, type);
return answer;
}
@ -1110,11 +1122,11 @@ class ConstantPoolEntry {
DCHECK(is_merged());
}
int offset(void) const {
DCHECK(merged_index_ >= 0);
DCHECK_GE(merged_index_, 0);
return merged_index_;
}
void set_offset(int offset) {
DCHECK(offset >= 0);
DCHECK_GE(offset, 0);
merged_index_ = offset;
}
intptr_t value() const { return value_; }
@ -1263,6 +1275,10 @@ class HeapObjectRequest {
// and best performance in optimized code.
template <typename SubType, int kAfterLastRegister>
class RegisterBase {
// Internal enum class; used for calling constexpr methods, where we need to
// pass an integral type as template parameter.
enum class RegisterCode : int { kFirst = 0, kAfterLast = kAfterLastRegister };
public:
static constexpr int kCode_no_reg = -1;
static constexpr int kNumRegisters = kAfterLastRegister;
@ -1275,12 +1291,34 @@ class RegisterBase {
return SubType{code};
}
constexpr operator RegisterCode() const {
return static_cast<RegisterCode>(reg_code_);
}
template <RegisterCode reg_code>
static constexpr int code() {
static_assert(
reg_code >= RegisterCode::kFirst && reg_code < RegisterCode::kAfterLast,
"must be valid reg");
return static_cast<int>(reg_code);
}
template <RegisterCode reg_code>
static constexpr int bit() {
return 1 << code<reg_code>();
}
static SubType from_code(int code) {
DCHECK_LE(0, code);
DCHECK_GT(kNumRegisters, code);
return SubType{code};
}
template <RegisterCode... reg_codes>
static constexpr RegList ListOf() {
return CombineRegLists(RegisterBase::bit<reg_codes>()...);
}
bool is_valid() const { return reg_code_ != kCode_no_reg; }
int code() const {

View File

@ -71,7 +71,7 @@ class PerThreadAssertData final {
template <PerThreadAssertType kType, bool kAllow>
PerThreadAssertScope<kType, kAllow>::PerThreadAssertScope()
: data_(PerThreadAssertData::GetCurrent()) {
if (data_ == NULL) {
if (data_ == nullptr) {
data_ = new PerThreadAssertData();
PerThreadAssertData::SetCurrent(data_);
}
@ -92,7 +92,7 @@ void PerThreadAssertScope<kType, kAllow>::Release() {
DCHECK_NOT_NULL(data_);
data_->Set(kType, old_state_);
if (data_->DecrementLevel()) {
PerThreadAssertData::SetCurrent(NULL);
PerThreadAssertData::SetCurrent(nullptr);
delete data_;
}
data_ = nullptr;
@ -102,7 +102,7 @@ void PerThreadAssertScope<kType, kAllow>::Release() {
template <PerThreadAssertType kType, bool kAllow>
bool PerThreadAssertScope<kType, kAllow>::IsAllowed() {
PerThreadAssertData* data = PerThreadAssertData::GetCurrent();
return data == NULL || data->Get(kType);
return data == nullptr || data->Get(kType);
}

View File

@ -1,410 +0,0 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/ast/ast-expression-rewriter.h"
#include "src/ast/ast.h"
#include "src/objects-inl.h"
namespace v8 {
namespace internal {
// ----------------------------------------------------------------------------
// Implementation of AstExpressionRewriter
// The AST is traversed but no actual rewriting takes place, unless the
// Visit methods are overriden in subclasses.
#define REWRITE_THIS(node) \
do { \
if (!RewriteExpression(node)) return; \
} while (false)
#define NOTHING() DCHECK_NULL(replacement_)
void AstExpressionRewriter::VisitDeclarations(Declaration::List* declarations) {
for (Declaration::List::Iterator it = declarations->begin();
it != declarations->end(); ++it) {
AST_REWRITE(Declaration, *it, it = replacement);
}
}
void AstExpressionRewriter::VisitStatements(ZoneList<Statement*>* statements) {
for (int i = 0; i < statements->length(); i++) {
AST_REWRITE_LIST_ELEMENT(Statement, statements, i);
if (statements->at(i)->IsJump()) break;
}
}
void AstExpressionRewriter::VisitExpressions(
ZoneList<Expression*>* expressions) {
for (int i = 0; i < expressions->length(); i++) {
// The variable statement visiting code may pass NULL expressions
// to this code. Maybe this should be handled by introducing an
// undefined expression or literal? Revisit this code if this
// changes
if (expressions->at(i) != nullptr) {
AST_REWRITE_LIST_ELEMENT(Expression, expressions, i);
}
}
}
void AstExpressionRewriter::VisitVariableDeclaration(
VariableDeclaration* node) {
// Not visiting `proxy_`.
NOTHING();
}
void AstExpressionRewriter::VisitFunctionDeclaration(
FunctionDeclaration* node) {
// Not visiting `proxy_`.
AST_REWRITE_PROPERTY(FunctionLiteral, node, fun);
}
void AstExpressionRewriter::VisitBlock(Block* node) {
VisitStatements(node->statements());
}
void AstExpressionRewriter::VisitExpressionStatement(
ExpressionStatement* node) {
AST_REWRITE_PROPERTY(Expression, node, expression);
}
void AstExpressionRewriter::VisitEmptyStatement(EmptyStatement* node) {
NOTHING();
}
void AstExpressionRewriter::VisitSloppyBlockFunctionStatement(
SloppyBlockFunctionStatement* node) {
AST_REWRITE_PROPERTY(Statement, node, statement);
}
void AstExpressionRewriter::VisitIfStatement(IfStatement* node) {
AST_REWRITE_PROPERTY(Expression, node, condition);
AST_REWRITE_PROPERTY(Statement, node, then_statement);
AST_REWRITE_PROPERTY(Statement, node, else_statement);
}
void AstExpressionRewriter::VisitContinueStatement(ContinueStatement* node) {
NOTHING();
}
void AstExpressionRewriter::VisitBreakStatement(BreakStatement* node) {
NOTHING();
}
void AstExpressionRewriter::VisitReturnStatement(ReturnStatement* node) {
AST_REWRITE_PROPERTY(Expression, node, expression);
}
void AstExpressionRewriter::VisitWithStatement(WithStatement* node) {
AST_REWRITE_PROPERTY(Expression, node, expression);
AST_REWRITE_PROPERTY(Statement, node, statement);
}
void AstExpressionRewriter::VisitSwitchStatement(SwitchStatement* node) {
AST_REWRITE_PROPERTY(Expression, node, tag);
for (CaseClause* clause : *node->cases()) {
if (!clause->is_default()) {
AST_REWRITE_PROPERTY(Expression, clause, label);
}
VisitStatements(clause->statements());
}
}
void AstExpressionRewriter::VisitDoWhileStatement(DoWhileStatement* node) {
AST_REWRITE_PROPERTY(Expression, node, cond);
AST_REWRITE_PROPERTY(Statement, node, body);
}
void AstExpressionRewriter::VisitWhileStatement(WhileStatement* node) {
AST_REWRITE_PROPERTY(Expression, node, cond);
AST_REWRITE_PROPERTY(Statement, node, body);
}
void AstExpressionRewriter::VisitForStatement(ForStatement* node) {
if (node->init() != nullptr) {
AST_REWRITE_PROPERTY(Statement, node, init);
}
if (node->cond() != nullptr) {
AST_REWRITE_PROPERTY(Expression, node, cond);
}
if (node->next() != nullptr) {
AST_REWRITE_PROPERTY(Statement, node, next);
}
AST_REWRITE_PROPERTY(Statement, node, body);
}
void AstExpressionRewriter::VisitForInStatement(ForInStatement* node) {
AST_REWRITE_PROPERTY(Expression, node, each);
AST_REWRITE_PROPERTY(Expression, node, subject);
AST_REWRITE_PROPERTY(Statement, node, body);
}
void AstExpressionRewriter::VisitForOfStatement(ForOfStatement* node) {
AST_REWRITE_PROPERTY(Expression, node, assign_iterator);
AST_REWRITE_PROPERTY(Expression, node, next_result);
AST_REWRITE_PROPERTY(Expression, node, result_done);
AST_REWRITE_PROPERTY(Expression, node, assign_each);
AST_REWRITE_PROPERTY(Statement, node, body);
}
void AstExpressionRewriter::VisitTryCatchStatement(TryCatchStatement* node) {
AST_REWRITE_PROPERTY(Block, node, try_block);
// Not visiting the variable.
AST_REWRITE_PROPERTY(Block, node, catch_block);
}
void AstExpressionRewriter::VisitTryFinallyStatement(
TryFinallyStatement* node) {
AST_REWRITE_PROPERTY(Block, node, try_block);
AST_REWRITE_PROPERTY(Block, node, finally_block);
}
void AstExpressionRewriter::VisitDebuggerStatement(DebuggerStatement* node) {
NOTHING();
}
void AstExpressionRewriter::VisitFunctionLiteral(FunctionLiteral* node) {
REWRITE_THIS(node);
VisitDeclarations(node->scope()->declarations());
ZoneList<Statement*>* body = node->body();
if (body != nullptr) VisitStatements(body);
}
void AstExpressionRewriter::VisitClassLiteral(ClassLiteral* node) {
REWRITE_THIS(node);
// Not visiting `class_variable_proxy_`.
if (node->extends() != nullptr) {
AST_REWRITE_PROPERTY(Expression, node, extends);
}
AST_REWRITE_PROPERTY(FunctionLiteral, node, constructor);
ZoneList<typename ClassLiteral::Property*>* properties = node->properties();
for (int i = 0; i < properties->length(); i++) {
VisitLiteralProperty(properties->at(i));
}
}
void AstExpressionRewriter::VisitNativeFunctionLiteral(
NativeFunctionLiteral* node) {
REWRITE_THIS(node);
NOTHING();
}
void AstExpressionRewriter::VisitConditional(Conditional* node) {
REWRITE_THIS(node);
AST_REWRITE_PROPERTY(Expression, node, condition);
AST_REWRITE_PROPERTY(Expression, node, then_expression);
AST_REWRITE_PROPERTY(Expression, node, else_expression);
}
void AstExpressionRewriter::VisitVariableProxy(VariableProxy* node) {
REWRITE_THIS(node);
NOTHING();
}
void AstExpressionRewriter::VisitLiteral(Literal* node) {
REWRITE_THIS(node);
NOTHING();
}
void AstExpressionRewriter::VisitRegExpLiteral(RegExpLiteral* node) {
REWRITE_THIS(node);
NOTHING();
}
void AstExpressionRewriter::VisitObjectLiteral(ObjectLiteral* node) {
REWRITE_THIS(node);
ZoneList<typename ObjectLiteral::Property*>* properties = node->properties();
for (int i = 0; i < properties->length(); i++) {
VisitLiteralProperty(properties->at(i));
}
}
void AstExpressionRewriter::VisitLiteralProperty(LiteralProperty* property) {
if (property == nullptr) return;
AST_REWRITE_PROPERTY(Expression, property, key);
AST_REWRITE_PROPERTY(Expression, property, value);
}
void AstExpressionRewriter::VisitArrayLiteral(ArrayLiteral* node) {
REWRITE_THIS(node);
VisitExpressions(node->values());
}
void AstExpressionRewriter::VisitAssignment(Assignment* node) {
REWRITE_THIS(node);
AST_REWRITE_PROPERTY(Expression, node, target);
AST_REWRITE_PROPERTY(Expression, node, value);
}
void AstExpressionRewriter::VisitCompoundAssignment(CompoundAssignment* node) {
VisitAssignment(node);
}
void AstExpressionRewriter::VisitYield(Yield* node) {
REWRITE_THIS(node);
AST_REWRITE_PROPERTY(Expression, node, expression);
}
void AstExpressionRewriter::VisitYieldStar(YieldStar* node) {
REWRITE_THIS(node);
AST_REWRITE_PROPERTY(Expression, node, expression);
}
void AstExpressionRewriter::VisitAwait(Await* node) {
REWRITE_THIS(node);
AST_REWRITE_PROPERTY(Expression, node, expression);
}
void AstExpressionRewriter::VisitThrow(Throw* node) {
REWRITE_THIS(node);
AST_REWRITE_PROPERTY(Expression, node, exception);
}
void AstExpressionRewriter::VisitProperty(Property* node) {
REWRITE_THIS(node);
if (node == nullptr) return;
AST_REWRITE_PROPERTY(Expression, node, obj);
AST_REWRITE_PROPERTY(Expression, node, key);
}
void AstExpressionRewriter::VisitCall(Call* node) {
REWRITE_THIS(node);
AST_REWRITE_PROPERTY(Expression, node, expression);
VisitExpressions(node->arguments());
}
void AstExpressionRewriter::VisitCallNew(CallNew* node) {
REWRITE_THIS(node);
AST_REWRITE_PROPERTY(Expression, node, expression);
VisitExpressions(node->arguments());
}
void AstExpressionRewriter::VisitCallRuntime(CallRuntime* node) {
REWRITE_THIS(node);
VisitExpressions(node->arguments());
}
void AstExpressionRewriter::VisitUnaryOperation(UnaryOperation* node) {
REWRITE_THIS(node);
AST_REWRITE_PROPERTY(Expression, node, expression);
}
void AstExpressionRewriter::VisitCountOperation(CountOperation* node) {
REWRITE_THIS(node);
AST_REWRITE_PROPERTY(Expression, node, expression);
}
void AstExpressionRewriter::VisitBinaryOperation(BinaryOperation* node) {
REWRITE_THIS(node);
AST_REWRITE_PROPERTY(Expression, node, left);
AST_REWRITE_PROPERTY(Expression, node, right);
}
void AstExpressionRewriter::VisitCompareOperation(CompareOperation* node) {
REWRITE_THIS(node);
AST_REWRITE_PROPERTY(Expression, node, left);
AST_REWRITE_PROPERTY(Expression, node, right);
}
void AstExpressionRewriter::VisitSpread(Spread* node) {
REWRITE_THIS(node);
AST_REWRITE_PROPERTY(Expression, node, expression);
}
void AstExpressionRewriter::VisitThisFunction(ThisFunction* node) {
REWRITE_THIS(node);
NOTHING();
}
void AstExpressionRewriter::VisitSuperPropertyReference(
SuperPropertyReference* node) {
REWRITE_THIS(node);
AST_REWRITE_PROPERTY(VariableProxy, node, this_var);
AST_REWRITE_PROPERTY(Expression, node, home_object);
}
void AstExpressionRewriter::VisitSuperCallReference(SuperCallReference* node) {
REWRITE_THIS(node);
AST_REWRITE_PROPERTY(VariableProxy, node, this_var);
AST_REWRITE_PROPERTY(VariableProxy, node, new_target_var);
AST_REWRITE_PROPERTY(VariableProxy, node, this_function_var);
}
void AstExpressionRewriter::VisitEmptyParentheses(EmptyParentheses* node) {
NOTHING();
}
void AstExpressionRewriter::VisitGetIterator(GetIterator* node) {
AST_REWRITE_PROPERTY(Expression, node, iterable);
}
void AstExpressionRewriter::VisitGetTemplateObject(GetTemplateObject* node) {
NOTHING();
}
void AstExpressionRewriter::VisitImportCallExpression(
ImportCallExpression* node) {
REWRITE_THIS(node);
AST_REWRITE_PROPERTY(Expression, node, argument);
}
void AstExpressionRewriter::VisitDoExpression(DoExpression* node) {
REWRITE_THIS(node);
AST_REWRITE_PROPERTY(Block, node, block);
AST_REWRITE_PROPERTY(VariableProxy, node, result);
}
void AstExpressionRewriter::VisitRewritableExpression(
RewritableExpression* node) {
REWRITE_THIS(node);
AST_REWRITE(Expression, node->expression(), node->Rewrite(replacement));
}
} // namespace internal
} // namespace v8

View File

@ -1,53 +0,0 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_AST_AST_EXPRESSION_REWRITER_H_
#define V8_AST_AST_EXPRESSION_REWRITER_H_
#include "src/allocation.h"
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
#include "src/zone/zone.h"
namespace v8 {
namespace internal {
// A rewriting Visitor over a CompilationInfo's AST that invokes
// VisitExpression on each expression node.
// This AstVistor is not final, and provides the AstVisitor methods as virtual
// methods so they can be specialized by subclasses.
class AstExpressionRewriter : public AstVisitor<AstExpressionRewriter> {
public:
explicit AstExpressionRewriter(Isolate* isolate) {
InitializeAstRewriter(isolate);
}
explicit AstExpressionRewriter(uintptr_t stack_limit) {
InitializeAstRewriter(stack_limit);
}
virtual ~AstExpressionRewriter() {}
virtual void VisitDeclarations(Declaration::List* declarations);
virtual void VisitStatements(ZoneList<Statement*>* statements);
virtual void VisitExpressions(ZoneList<Expression*>* expressions);
virtual void VisitLiteralProperty(LiteralProperty* property);
protected:
virtual bool RewriteExpression(Expression* expr) = 0;
private:
DEFINE_AST_REWRITER_SUBCLASS_MEMBERS();
#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
DISALLOW_COPY_AND_ASSIGN(AstExpressionRewriter);
};
} // namespace internal
} // namespace v8
#endif // V8_AST_AST_EXPRESSION_REWRITER_H_

View File

@ -15,16 +15,11 @@ namespace internal {
class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
public:
AstNumberingVisitor(uintptr_t stack_limit, Zone* zone,
Compiler::EagerInnerFunctionLiterals* eager_literals,
bool collect_type_profile = false)
Compiler::EagerInnerFunctionLiterals* eager_literals)
: zone_(zone),
eager_literals_(eager_literals),
suspend_count_(0),
properties_(zone),
language_mode_(SLOPPY),
slot_cache_(zone),
dont_optimize_reason_(kNoReason),
collect_type_profile_(collect_type_profile) {
dont_optimize_reason_(kNoReason) {
InitializeAstVisitor(stack_limit);
}
@ -36,10 +31,6 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
AST_NODE_LIST(DEFINE_VISIT)
#undef DEFINE_VISIT
void VisitVariableProxy(VariableProxy* node, TypeofMode typeof_mode);
void VisitVariableProxyReference(VariableProxy* node);
void VisitPropertyReference(Property* node);
void VisitReference(Expression* expr);
void VisitSuspend(Suspend* node);
void VisitStatementsAndDeclarations(Block* node);
@ -52,25 +43,6 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
dont_optimize_reason_ = reason;
}
template <typename Node>
void ReserveFeedbackSlots(Node* node) {
node->AssignFeedbackSlots(properties_.get_spec(), language_mode_,
function_kind_, &slot_cache_);
}
class LanguageModeScope {
public:
LanguageModeScope(AstNumberingVisitor* visitor, LanguageMode language_mode)
: visitor_(visitor), outer_language_mode_(visitor->language_mode_) {
visitor_->language_mode_ = language_mode;
}
~LanguageModeScope() { visitor_->language_mode_ = outer_language_mode_; }
private:
AstNumberingVisitor* visitor_;
LanguageMode outer_language_mode_;
};
BailoutReason dont_optimize_reason() const { return dont_optimize_reason_; }
Zone* zone() const { return zone_; }
@ -78,105 +50,72 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
Zone* zone_;
Compiler::EagerInnerFunctionLiterals* eager_literals_;
int suspend_count_;
AstProperties properties_;
LanguageMode language_mode_;
FunctionKind function_kind_;
// The slot cache allows us to reuse certain feedback slots.
FeedbackSlotCache slot_cache_;
BailoutReason dont_optimize_reason_;
bool collect_type_profile_;
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
DISALLOW_COPY_AND_ASSIGN(AstNumberingVisitor);
};
void AstNumberingVisitor::VisitVariableDeclaration(VariableDeclaration* node) {
VisitVariableProxy(node->proxy());
}
void AstNumberingVisitor::VisitEmptyStatement(EmptyStatement* node) {
}
void AstNumberingVisitor::VisitSloppyBlockFunctionStatement(
SloppyBlockFunctionStatement* node) {
Visit(node->statement());
}
void AstNumberingVisitor::VisitContinueStatement(ContinueStatement* node) {
}
void AstNumberingVisitor::VisitBreakStatement(BreakStatement* node) {
}
void AstNumberingVisitor::VisitDebuggerStatement(DebuggerStatement* node) {
}
void AstNumberingVisitor::VisitNativeFunctionLiteral(
NativeFunctionLiteral* node) {
DisableOptimization(kNativeFunctionLiteral);
ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitDoExpression(DoExpression* node) {
Visit(node->block());
Visit(node->result());
}
void AstNumberingVisitor::VisitLiteral(Literal* node) {
}
void AstNumberingVisitor::VisitRegExpLiteral(RegExpLiteral* node) {
ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitVariableProxyReference(VariableProxy* node) {
}
void AstNumberingVisitor::VisitVariableProxy(VariableProxy* node,
TypeofMode typeof_mode) {
VisitVariableProxyReference(node);
node->AssignFeedbackSlots(properties_.get_spec(), typeof_mode, &slot_cache_);
}
void AstNumberingVisitor::VisitVariableProxy(VariableProxy* node) {
VisitVariableProxy(node, NOT_INSIDE_TYPEOF);
}
void AstNumberingVisitor::VisitThisFunction(ThisFunction* node) {
}
void AstNumberingVisitor::VisitSuperPropertyReference(
SuperPropertyReference* node) {
Visit(node->this_var());
Visit(node->home_object());
}
void AstNumberingVisitor::VisitSuperCallReference(SuperCallReference* node) {
Visit(node->this_var());
Visit(node->new_target_var());
Visit(node->this_function_var());
}
void AstNumberingVisitor::VisitExpressionStatement(ExpressionStatement* node) {
Visit(node->expression());
}
void AstNumberingVisitor::VisitReturnStatement(ReturnStatement* node) {
Visit(node->expression());
}
@ -196,7 +135,6 @@ void AstNumberingVisitor::VisitYieldStar(YieldStar* node) {
node->set_await_delegated_iterator_output_suspend_id(suspend_count_++);
}
Visit(node->expression());
ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitAwait(Await* node) { VisitSuspend(node); }
@ -205,32 +143,16 @@ void AstNumberingVisitor::VisitThrow(Throw* node) {
Visit(node->exception());
}
void AstNumberingVisitor::VisitUnaryOperation(UnaryOperation* node) {
if ((node->op() == Token::TYPEOF) && node->expression()->IsVariableProxy()) {
VariableProxy* proxy = node->expression()->AsVariableProxy();
VisitVariableProxy(proxy, INSIDE_TYPEOF);
} else {
Visit(node->expression());
}
ReserveFeedbackSlots(node);
Visit(node->expression());
}
void AstNumberingVisitor::VisitCountOperation(CountOperation* node) {
Visit(node->expression());
ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitBlock(Block* node) {
Scope* scope = node->scope();
if (scope != nullptr) {
LanguageModeScope language_mode_scope(this, scope->language_mode());
VisitStatementsAndDeclarations(node);
} else {
VisitStatementsAndDeclarations(node);
}
VisitStatementsAndDeclarations(node);
}
void AstNumberingVisitor::VisitStatementsAndDeclarations(Block* node) {
@ -245,18 +167,15 @@ void AstNumberingVisitor::VisitFunctionDeclaration(FunctionDeclaration* node) {
VisitFunctionLiteral(node->fun());
}
void AstNumberingVisitor::VisitCallRuntime(CallRuntime* node) {
VisitArguments(node->arguments());
}
void AstNumberingVisitor::VisitWithStatement(WithStatement* node) {
Visit(node->expression());
Visit(node->statement());
}
void AstNumberingVisitor::VisitDoWhileStatement(DoWhileStatement* node) {
node->set_first_suspend_id(suspend_count_);
Visit(node->body());
@ -264,7 +183,6 @@ void AstNumberingVisitor::VisitDoWhileStatement(DoWhileStatement* node) {
node->set_suspend_count(suspend_count_ - node->first_suspend_id());
}
void AstNumberingVisitor::VisitWhileStatement(WhileStatement* node) {
node->set_first_suspend_id(suspend_count_);
Visit(node->cond());
@ -272,46 +190,25 @@ void AstNumberingVisitor::VisitWhileStatement(WhileStatement* node) {
node->set_suspend_count(suspend_count_ - node->first_suspend_id());
}
void AstNumberingVisitor::VisitTryCatchStatement(TryCatchStatement* node) {
DCHECK(node->scope() == nullptr || !node->scope()->HasBeenRemoved());
Visit(node->try_block());
Visit(node->catch_block());
}
void AstNumberingVisitor::VisitTryFinallyStatement(TryFinallyStatement* node) {
Visit(node->try_block());
Visit(node->finally_block());
}
void AstNumberingVisitor::VisitPropertyReference(Property* node) {
void AstNumberingVisitor::VisitProperty(Property* node) {
Visit(node->key());
Visit(node->obj());
}
void AstNumberingVisitor::VisitReference(Expression* expr) {
DCHECK(expr->IsProperty() || expr->IsVariableProxy());
if (expr->IsProperty()) {
VisitPropertyReference(expr->AsProperty());
} else {
VisitVariableProxyReference(expr->AsVariableProxy());
}
}
void AstNumberingVisitor::VisitProperty(Property* node) {
VisitPropertyReference(node);
ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitAssignment(Assignment* node) {
VisitReference(node->target());
Visit(node->target());
Visit(node->value());
ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitCompoundAssignment(CompoundAssignment* node) {
@ -322,14 +219,18 @@ void AstNumberingVisitor::VisitCompoundAssignment(CompoundAssignment* node) {
void AstNumberingVisitor::VisitBinaryOperation(BinaryOperation* node) {
Visit(node->left());
Visit(node->right());
ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitNaryOperation(NaryOperation* node) {
Visit(node->first());
for (size_t i = 0; i < node->subsequent_length(); ++i) {
Visit(node->subsequent(i));
}
}
void AstNumberingVisitor::VisitCompareOperation(CompareOperation* node) {
Visit(node->left());
Visit(node->right());
ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitSpread(Spread* node) {
@ -342,7 +243,6 @@ void AstNumberingVisitor::VisitEmptyParentheses(EmptyParentheses* node) {
void AstNumberingVisitor::VisitGetIterator(GetIterator* node) {
Visit(node->iterable());
ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitGetTemplateObject(GetTemplateObject* node) {}
@ -358,10 +258,8 @@ void AstNumberingVisitor::VisitForInStatement(ForInStatement* node) {
Visit(node->each());
Visit(node->body());
node->set_suspend_count(suspend_count_ - node->first_suspend_id());
ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitForOfStatement(ForOfStatement* node) {
Visit(node->assign_iterator()); // Not part of loop.
node->set_first_suspend_id(suspend_count_);
@ -372,14 +270,12 @@ void AstNumberingVisitor::VisitForOfStatement(ForOfStatement* node) {
node->set_suspend_count(suspend_count_ - node->first_suspend_id());
}
void AstNumberingVisitor::VisitConditional(Conditional* node) {
Visit(node->condition());
Visit(node->then_expression());
Visit(node->else_expression());
}
void AstNumberingVisitor::VisitIfStatement(IfStatement* node) {
Visit(node->condition());
Visit(node->then_statement());
@ -388,37 +284,43 @@ void AstNumberingVisitor::VisitIfStatement(IfStatement* node) {
}
}
void AstNumberingVisitor::VisitSwitchStatement(SwitchStatement* node) {
Visit(node->tag());
for (CaseClause* clause : *node->cases()) {
if (!clause->is_default()) Visit(clause->label());
VisitStatements(clause->statements());
ReserveFeedbackSlots(clause);
}
}
void AstNumberingVisitor::VisitForStatement(ForStatement* node) {
if (node->init() != NULL) Visit(node->init()); // Not part of loop.
if (node->init() != nullptr) Visit(node->init()); // Not part of loop.
node->set_first_suspend_id(suspend_count_);
if (node->cond() != NULL) Visit(node->cond());
if (node->next() != NULL) Visit(node->next());
if (node->cond() != nullptr) Visit(node->cond());
if (node->next() != nullptr) Visit(node->next());
Visit(node->body());
node->set_suspend_count(suspend_count_ - node->first_suspend_id());
}
void AstNumberingVisitor::VisitClassLiteral(ClassLiteral* node) {
LanguageModeScope language_mode_scope(this, STRICT);
if (node->extends()) Visit(node->extends());
if (node->constructor()) Visit(node->constructor());
if (node->static_fields_initializer() != nullptr) {
Visit(node->static_fields_initializer());
}
if (node->instance_fields_initializer_function() != nullptr) {
Visit(node->instance_fields_initializer_function());
}
for (int i = 0; i < node->properties()->length(); i++) {
VisitLiteralProperty(node->properties()->at(i));
}
ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitInitializeClassFieldsStatement(
InitializeClassFieldsStatement* node) {
for (int i = 0; i < node->fields()->length(); i++) {
VisitLiteralProperty(node->fields()->at(i));
}
}
void AstNumberingVisitor::VisitObjectLiteral(ObjectLiteral* node) {
for (int i = 0; i < node->properties()->length(); i++) {
@ -429,7 +331,6 @@ void AstNumberingVisitor::VisitObjectLiteral(ObjectLiteral* node) {
// is shadowed by a later occurrence of the same key. For the
// marked expressions, no store code will be is emitted.
node->CalculateEmitStore(zone_);
ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitLiteralProperty(LiteralProperty* node) {
@ -442,26 +343,20 @@ void AstNumberingVisitor::VisitArrayLiteral(ArrayLiteral* node) {
Visit(node->values()->at(i));
}
node->InitDepthAndFlags();
ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitCall(Call* node) {
ReserveFeedbackSlots(node);
Visit(node->expression());
VisitArguments(node->arguments());
}
void AstNumberingVisitor::VisitCallNew(CallNew* node) {
ReserveFeedbackSlots(node);
Visit(node->expression());
VisitArguments(node->arguments());
}
void AstNumberingVisitor::VisitStatements(ZoneList<Statement*>* statements) {
if (statements == NULL) return;
if (statements == nullptr) return;
for (int i = 0; i < statements->length(); i++) {
Visit(statements->at(i));
if (statements->at(i)->IsJump()) break;
@ -472,14 +367,12 @@ void AstNumberingVisitor::VisitDeclarations(Declaration::List* decls) {
for (Declaration* decl : *decls) Visit(decl);
}
void AstNumberingVisitor::VisitArguments(ZoneList<Expression*>* arguments) {
for (int i = 0; i < arguments->length(); i++) {
Visit(arguments->at(i));
}
}
void AstNumberingVisitor::VisitFunctionLiteral(FunctionLiteral* node) {
if (node->ShouldEagerCompile()) {
if (eager_literals_) {
@ -494,30 +387,21 @@ void AstNumberingVisitor::VisitFunctionLiteral(FunctionLiteral* node) {
return;
}
}
ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitRewritableExpression(
RewritableExpression* node) {
Visit(node->expression());
}
bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
DeclarationScope* scope = node->scope();
DCHECK(!scope->HasBeenRemoved());
function_kind_ = node->kind();
LanguageModeScope language_mode_scope(this, node->language_mode());
if (collect_type_profile_) {
properties_.get_spec()->AddTypeProfileSlot();
}
VisitDeclarations(scope->declarations());
VisitStatements(node->body());
node->set_ast_properties(&properties_);
node->set_dont_optimize_reason(dont_optimize_reason());
node->set_suspend_count(suspend_count_);
@ -526,14 +410,12 @@ bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
bool AstNumbering::Renumber(
uintptr_t stack_limit, Zone* zone, FunctionLiteral* function,
Compiler::EagerInnerFunctionLiterals* eager_literals,
bool collect_type_profile) {
Compiler::EagerInnerFunctionLiterals* eager_literals) {
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
AstNumberingVisitor visitor(stack_limit, zone, eager_literals,
collect_type_profile);
AstNumberingVisitor visitor(stack_limit, zone, eager_literals);
return visitor.Renumber(function);
}
} // namespace internal

View File

@ -22,13 +22,12 @@ template <typename T>
class ZoneVector;
namespace AstNumbering {
// Assign type feedback IDs, bailout IDs, and generator suspend IDs to an AST
// node tree; perform catch prediction for TryStatements. If |eager_literals| is
// non-null, adds any eager inner literal functions into it.
// Assign bailout IDs, and generator suspend IDs to an AST node tree; perform
// catch prediction for TryStatements. If |eager_literals| is non-null, adds any
// eager inner literal functions into it.
bool Renumber(
uintptr_t stack_limit, Zone* zone, FunctionLiteral* function,
ThreadedList<ThreadedListZoneEntry<FunctionLiteral*>>* eager_literals,
bool collect_type_profile = false);
ThreadedList<ThreadedListZoneEntry<FunctionLiteral*>>* eager_literals);
}
// Some details on suspend IDs

View File

@ -30,12 +30,14 @@ struct SourceRange {
// The list of ast node kinds that have associated source ranges. Note that this
// macro is not undefined at the end of this file.
#define AST_SOURCE_RANGE_LIST(V) \
V(BinaryOperation) \
V(Block) \
V(CaseClause) \
V(Conditional) \
V(IfStatement) \
V(IterationStatement) \
V(JumpStatement) \
V(NaryOperation) \
V(Suspend) \
V(SwitchStatement) \
V(Throw) \
@ -48,6 +50,7 @@ enum class SourceRangeKind {
kContinuation,
kElse,
kFinally,
kRight,
kThen,
};
@ -57,13 +60,27 @@ class AstNodeSourceRanges : public ZoneObject {
virtual SourceRange GetRange(SourceRangeKind kind) = 0;
};
class BinaryOperationSourceRanges final : public AstNodeSourceRanges {
public:
explicit BinaryOperationSourceRanges(const SourceRange& right_range)
: right_range_(right_range) {}
SourceRange GetRange(SourceRangeKind kind) {
DCHECK_EQ(kind, SourceRangeKind::kRight);
return right_range_;
}
private:
SourceRange right_range_;
};
class ContinuationSourceRanges : public AstNodeSourceRanges {
public:
explicit ContinuationSourceRanges(int32_t continuation_position)
: continuation_position_(continuation_position) {}
SourceRange GetRange(SourceRangeKind kind) {
DCHECK(kind == SourceRangeKind::kContinuation);
DCHECK_EQ(kind, SourceRangeKind::kContinuation);
return SourceRange::OpenEnded(continuation_position_);
}
@ -83,7 +100,7 @@ class CaseClauseSourceRanges final : public AstNodeSourceRanges {
: body_range_(body_range) {}
SourceRange GetRange(SourceRangeKind kind) {
DCHECK(kind == SourceRangeKind::kBody);
DCHECK_EQ(kind, SourceRangeKind::kBody);
return body_range_;
}
@ -166,6 +183,27 @@ class JumpStatementSourceRanges final : public ContinuationSourceRanges {
: ContinuationSourceRanges(continuation_position) {}
};
class NaryOperationSourceRanges final : public AstNodeSourceRanges {
public:
NaryOperationSourceRanges(Zone* zone, const SourceRange& range)
: ranges_(zone) {
AddRange(range);
}
SourceRange GetRangeAtIndex(size_t index) {
DCHECK(index < ranges_.size());
return ranges_[index];
}
void AddRange(const SourceRange& range) { ranges_.push_back(range); }
size_t RangeCount() const { return ranges_.size(); }
SourceRange GetRange(SourceRangeKind kind) { UNREACHABLE(); }
private:
ZoneVector<SourceRange> ranges_;
};
class SuspendSourceRanges final : public ContinuationSourceRanges {
public:
explicit SuspendSourceRanges(int32_t continuation_position)

Some files were not shown because too many files have changed in this diff Show More