deps: update V8 to 7.1.302.28

PR-URL: https://github.com/nodejs/node/pull/23423
Reviewed-By: Colin Ihrig <cjihrig@gmail.com>
Reviewed-By: Gus Caplan <me@gus.host>
Reviewed-By: Myles Borins <myles.borins@gmail.com>
This commit is contained in:
Michaël Zasso 2018-12-04 08:20:37 +01:00
parent b8fbe69db1
commit 9b4bf7de6c
No known key found for this signature in database
GPG Key ID: 770F7A9A5AE15600
1526 changed files with 78560 additions and 41829 deletions

20
deps/v8/.clang-tidy vendored Normal file
View File

@ -0,0 +1,20 @@
---
---
Checks: '-*,
modernize-redundant-void-arg,
modernize-replace-random-shuffle,
modernize-shrink-to-fit,
modernize-use-auto,
modernize-use-bool-literals,
modernize-use-equals-default,
modernize-use-equals-delete,
modernize-use-nullptr,
modernize-use-override,
google-build-explicit-make-pair,
google-explicit-constructor,
google-readability-casting'
WarningsAsErrors: ''
HeaderFilterRegex: ''
AnalyzeTemporaryDtors: false
...

View File

@ -3,3 +3,5 @@
# Do not modify line endings for binary files (which are sometimes auto
# detected as text files by git).
*.png binary
# Don't include minified JS in git grep/diff output
test/mjsunit/asm/sqlite3/*.js -diff

1
deps/v8/.gitignore vendored
View File

@ -73,7 +73,6 @@
/tools/clang
/tools/gcmole/gcmole-tools
/tools/gcmole/gcmole-tools.tar.gz
/tools/gyp
/tools/jsfunfuzz/jsfunfuzz
/tools/jsfunfuzz/jsfunfuzz.tar.gz
/tools/luci-go

2
deps/v8/AUTHORS vendored
View File

@ -132,6 +132,7 @@ Peter Rybin <peter.rybin@gmail.com>
Peter Varga <pvarga@inf.u-szeged.hu>
Peter Wong <peter.wm.wong@gmail.com>
Paul Lind <plind44@gmail.com>
PhistucK <phistuck@gmail.com>
Qingyan Li <qingyan.liqy@alibaba-inc.com>
Qiuyi Zhang <qiuyi.zqy@alibaba-inc.com>
Rafal Krypa <rafal@krypa.net>
@ -162,6 +163,7 @@ Vladimir Krivosheev <develar@gmail.com>
Vladimir Shutoff <vovan@shutoff.ru>
Wiktor Garbacz <wiktor.garbacz@gmail.com>
Xiaoyin Liu <xiaoyin.l@outlook.com>
Yannic Bonenberger <contact@yannic-bonenberger.com>
Yong Wang <ccyongwang@tencent.com>
Yu Yin <xwafish@gmail.com>
Zac Hansen <xaxxon@gmail.com>

198
deps/v8/BUILD.gn vendored
View File

@ -76,9 +76,6 @@ declare_args() {
v8_enable_embedded_builtins = v8_use_snapshot && v8_current_cpu != "x86" &&
!is_aix && (!is_win || is_clang)
# Enable embedded bytecode handlers.
v8_enable_embedded_bytecode_handlers = false
# Enable code-generation-time checking of types in the CodeStubAssembler.
v8_enable_verify_csa = false
@ -164,6 +161,17 @@ declare_args() {
# setting the "check_v8_header_includes" gclient variable to run a
# specific hook).
v8_check_header_includes = false
# We reuse the snapshot toolchain for building torque and other generators to
# avoid building v8_libbase on the host more than once. On mips with big
# endian, the snapshot toolchain is the target toolchain and, hence, can't be
# used.
}
v8_generator_toolchain = v8_snapshot_toolchain
if (host_cpu == "x64" &&
(v8_current_cpu == "mips" || v8_current_cpu == "mips64")) {
v8_generator_toolchain = "//build/toolchain/linux:clang_x64"
}
# Derived defaults.
@ -197,9 +205,6 @@ assert(
!v8_untrusted_code_mitigations,
"Embedded builtins on ia32 and untrusted code mitigations are incompatible")
assert(!v8_enable_embedded_bytecode_handlers || v8_enable_embedded_builtins,
"Embedded bytecode handlers only work with embedded builtins")
# Specifies if the target build is a simulator build. Comparing target cpu
# with v8 target cpu to not affect simulator builds for making cross-compile
# snapshots.
@ -377,10 +382,10 @@ config("features") {
defines += [ "V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY" ]
}
if (v8_enable_embedded_builtins) {
defines += [ "V8_EMBEDDED_BUILTINS" ]
}
if (v8_enable_embedded_bytecode_handlers) {
defines += [ "V8_EMBEDDED_BYTECODE_HANDLERS" ]
defines += [
"V8_EMBEDDED_BUILTINS",
"V8_EMBEDDED_BYTECODE_HANDLERS",
]
}
if (v8_use_multi_snapshots) {
defines += [ "V8_MULTI_SNAPSHOTS" ]
@ -849,6 +854,8 @@ action("postmortem-metadata") {
sources = [
"src/objects.h",
"src/objects-inl.h",
"src/objects/allocation-site-inl.h",
"src/objects/allocation-site.h",
"src/objects/code-inl.h",
"src/objects/code.h",
"src/objects/data-handler.h",
@ -859,6 +866,8 @@ action("postmortem-metadata") {
"src/objects/js-array.h",
"src/objects/js-array-buffer-inl.h",
"src/objects/js-array-buffer.h",
"src/objects/js-objects-inl.h",
"src/objects/js-objects.h",
"src/objects/js-regexp-inl.h",
"src/objects/js-regexp.h",
"src/objects/js-regexp-string-iterator-inl.h",
@ -889,7 +898,10 @@ torque_files = [
"src/builtins/array.tq",
"src/builtins/array-copywithin.tq",
"src/builtins/array-foreach.tq",
"src/builtins/array-lastindexof.tq",
"src/builtins/array-reverse.tq",
"src/builtins/array-splice.tq",
"src/builtins/array-unshift.tq",
"src/builtins/typed-array.tq",
"src/builtins/data-view.tq",
"test/torque/test-torque.tq",
@ -911,17 +923,8 @@ action("run_torque") {
"test/cctest/:*",
]
# We reuse the snapshot toolchain for building torque to not build v8_libbase
# on the host more than once. On mips with big endian, the snapshot toolchain
# is the target toolchain and, hence, can't be used.
v8_torque_toolchain = v8_snapshot_toolchain
if (host_cpu == "x64" &&
(v8_current_cpu == "mips" || v8_current_cpu == "mips64")) {
v8_torque_toolchain = "//build/toolchain/linux:clang_x64"
}
deps = [
":torque($v8_torque_toolchain)",
":torque($v8_generator_toolchain)",
]
script = "tools/run.py"
@ -939,7 +942,7 @@ action("run_torque") {
}
args = [
"./" + rebase_path(get_label_info(":torque($v8_torque_toolchain)",
"./" + rebase_path(get_label_info(":torque($v8_generator_toolchain)",
"root_out_dir") + "/torque",
root_build_dir),
"-o",
@ -969,6 +972,7 @@ v8_source_set("torque_generated_initializers") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
deps = [
":generate_bytecode_builtins_list",
":run_torque",
]
@ -989,6 +993,24 @@ v8_source_set("torque_generated_initializers") {
configs = [ ":internal_config" ]
}
action("generate_bytecode_builtins_list") {
script = "tools/run.py"
outputs = [
"$target_gen_dir/builtins-generated/bytecodes-builtins-list.h",
]
deps = [
":bytecode_builtins_list_generator($v8_generator_toolchain)",
]
args = [
"./" + rebase_path(
get_label_info(
":bytecode_builtins_list_generator($v8_generator_toolchain)",
"root_out_dir") + "/bytecode_builtins_list_generator",
root_build_dir),
rebase_path("$target_gen_dir/builtins-generated/bytecodes-builtins-list.h"),
]
}
# Template to generate different V8 snapshots based on different runtime flags.
# Can be invoked with run_mksnapshot(<name>). The target will resolve to
# run_mksnapshot_<name>. If <name> is "default", no file suffixes will be used.
@ -1382,8 +1404,6 @@ v8_source_set("v8_initializers") {
"src/interpreter/interpreter-generator.h",
"src/interpreter/interpreter-intrinsics-generator.cc",
"src/interpreter/interpreter-intrinsics-generator.h",
"src/interpreter/setup-interpreter-internal.cc",
"src/interpreter/setup-interpreter.h",
]
if (use_jumbo_build == true) {
@ -1485,6 +1505,7 @@ v8_header_set("v8_headers") {
configs = [ ":internal_config" ]
sources = [
"include/v8-internal.h",
"include/v8.h",
"include/v8config.h",
]
@ -1504,8 +1525,10 @@ v8_source_set("v8_base") {
"//base/trace_event/common/trace_event_common.h",
### gcmole(all) ###
"$target_gen_dir/builtins-generated/bytecodes-builtins-list.h",
"include/v8-inspector-protocol.h",
"include/v8-inspector.h",
"include/v8-internal.h",
"include/v8-platform.h",
"include/v8-profiler.h",
"include/v8-testing.h",
@ -1516,6 +1539,7 @@ v8_source_set("v8_base") {
"src/accessors.h",
"src/address-map.cc",
"src/address-map.h",
"src/allocation-site-scopes-inl.h",
"src/allocation-site-scopes.h",
"src/allocation.cc",
"src/allocation.h",
@ -1562,6 +1586,7 @@ v8_source_set("v8_base") {
"src/ast/modules.h",
"src/ast/prettyprinter.cc",
"src/ast/prettyprinter.h",
"src/ast/scopes-inl.h",
"src/ast/scopes.cc",
"src/ast/scopes.h",
"src/ast/variables.cc",
@ -1599,7 +1624,6 @@ v8_source_set("v8_base") {
"src/builtins/builtins-internal.cc",
"src/builtins/builtins-interpreter.cc",
"src/builtins/builtins-intl.cc",
"src/builtins/builtins-intl.h",
"src/builtins/builtins-json.cc",
"src/builtins/builtins-math.cc",
"src/builtins/builtins-number.cc",
@ -1813,6 +1837,7 @@ v8_source_set("v8_base") {
"src/compiler/operator.h",
"src/compiler/osr.cc",
"src/compiler/osr.h",
"src/compiler/per-isolate-compiler-cache.h",
"src/compiler/persistent-map.h",
"src/compiler/pipeline-statistics.cc",
"src/compiler/pipeline-statistics.h",
@ -1824,6 +1849,8 @@ v8_source_set("v8_base") {
"src/compiler/raw-machine-assembler.h",
"src/compiler/redundancy-elimination.cc",
"src/compiler/redundancy-elimination.h",
"src/compiler/refs-map.cc",
"src/compiler/refs-map.h",
"src/compiler/register-allocator-verifier.cc",
"src/compiler/register-allocator-verifier.h",
"src/compiler/register-allocator.cc",
@ -2140,6 +2167,8 @@ v8_source_set("v8_base") {
"src/macro-assembler.h",
"src/map-updater.cc",
"src/map-updater.h",
"src/math-random.cc",
"src/math-random.h",
"src/maybe-handles-inl.h",
"src/maybe-handles.h",
"src/messages.cc",
@ -2158,6 +2187,7 @@ v8_source_set("v8_base") {
"src/objects/arguments.h",
"src/objects/bigint.cc",
"src/objects/bigint.h",
"src/objects/builtin-function-id.h",
"src/objects/code-inl.h",
"src/objects/code.h",
"src/objects/compilation-cache-inl.h",
@ -2181,11 +2211,17 @@ v8_source_set("v8_base") {
"src/objects/js-array-buffer.h",
"src/objects/js-array-inl.h",
"src/objects/js-array.h",
"src/objects/js-break-iterator-inl.h",
"src/objects/js-break-iterator.cc",
"src/objects/js-break-iterator.h",
"src/objects/js-collator-inl.h",
"src/objects/js-collator.cc",
"src/objects/js-collator.h",
"src/objects/js-collection-inl.h",
"src/objects/js-collection.h",
"src/objects/js-date-time-format-inl.h",
"src/objects/js-date-time-format.cc",
"src/objects/js-date-time-format.h",
"src/objects/js-generator-inl.h",
"src/objects/js-generator.h",
"src/objects/js-list-format-inl.h",
@ -2194,6 +2230,11 @@ v8_source_set("v8_base") {
"src/objects/js-locale-inl.h",
"src/objects/js-locale.cc",
"src/objects/js-locale.h",
"src/objects/js-number-format-inl.h",
"src/objects/js-number-format.cc",
"src/objects/js-number-format.h",
"src/objects/js-objects-inl.h",
"src/objects/js-objects.h",
"src/objects/js-plural-rules-inl.h",
"src/objects/js-plural-rules.cc",
"src/objects/js-plural-rules.h",
@ -2208,6 +2249,9 @@ v8_source_set("v8_base") {
"src/objects/js-relative-time-format-inl.h",
"src/objects/js-relative-time-format.cc",
"src/objects/js-relative-time-format.h",
"src/objects/js-segmenter-inl.h",
"src/objects/js-segmenter.cc",
"src/objects/js-segmenter.h",
"src/objects/literal-objects-inl.h",
"src/objects/literal-objects.cc",
"src/objects/literal-objects.h",
@ -2218,6 +2262,9 @@ v8_source_set("v8_base") {
"src/objects/maybe-object-inl.h",
"src/objects/maybe-object.h",
"src/objects/microtask-inl.h",
"src/objects/microtask-queue-inl.h",
"src/objects/microtask-queue.cc",
"src/objects/microtask-queue.h",
"src/objects/microtask.h",
"src/objects/module-inl.h",
"src/objects/module.cc",
@ -2231,6 +2278,8 @@ v8_source_set("v8_base") {
"src/objects/ordered-hash-table.h",
"src/objects/promise-inl.h",
"src/objects/promise.h",
"src/objects/property-array-inl.h",
"src/objects/property-array.h",
"src/objects/property-descriptor-object-inl.h",
"src/objects/property-descriptor-object.h",
"src/objects/prototype-info-inl.h",
@ -2242,6 +2291,8 @@ v8_source_set("v8_base") {
"src/objects/script.h",
"src/objects/shared-function-info-inl.h",
"src/objects/shared-function-info.h",
"src/objects/stack-frame-info-inl.h",
"src/objects/stack-frame-info.h",
"src/objects/string-inl.h",
"src/objects/string-table.h",
"src/objects/string.h",
@ -2267,6 +2318,7 @@ v8_source_set("v8_base") {
"src/parsing/parsing.cc",
"src/parsing/parsing.h",
"src/parsing/pattern-rewriter.cc",
"src/parsing/preparsed-scope-data-impl.h",
"src/parsing/preparsed-scope-data.cc",
"src/parsing/preparsed-scope-data.h",
"src/parsing/preparser-logger.h",
@ -2323,6 +2375,8 @@ v8_source_set("v8_base") {
"src/regexp/jsregexp-inl.h",
"src/regexp/jsregexp.cc",
"src/regexp/jsregexp.h",
"src/regexp/property-sequences.cc",
"src/regexp/property-sequences.h",
"src/regexp/regexp-ast.cc",
"src/regexp/regexp-ast.h",
"src/regexp/regexp-macro-assembler-irregexp-inl.h",
@ -2344,6 +2398,7 @@ v8_source_set("v8_base") {
"src/reloc-info.cc",
"src/reloc-info.h",
"src/roots-inl.h",
"src/roots.cc",
"src/roots.h",
"src/runtime-profiler.cc",
"src/runtime-profiler.h",
@ -2363,7 +2418,6 @@ v8_source_set("v8_base") {
"src/runtime/runtime-interpreter.cc",
"src/runtime/runtime-intl.cc",
"src/runtime/runtime-literals.cc",
"src/runtime/runtime-maths.cc",
"src/runtime/runtime-module.cc",
"src/runtime/runtime-numbers.cc",
"src/runtime/runtime-object.cc",
@ -2395,8 +2449,6 @@ v8_source_set("v8_base") {
"src/snapshot/builtin-serializer-allocator.h",
"src/snapshot/builtin-serializer.cc",
"src/snapshot/builtin-serializer.h",
"src/snapshot/builtin-snapshot-utils.cc",
"src/snapshot/builtin-snapshot-utils.h",
"src/snapshot/code-serializer.cc",
"src/snapshot/code-serializer.h",
"src/snapshot/default-deserializer-allocator.cc",
@ -2439,6 +2491,8 @@ v8_source_set("v8_base") {
"src/string-builder.cc",
"src/string-case.cc",
"src/string-case.h",
"src/string-constants.cc",
"src/string-constants.h",
"src/string-hasher-inl.h",
"src/string-hasher.h",
"src/string-search.h",
@ -2447,6 +2501,7 @@ v8_source_set("v8_base") {
"src/strtod.cc",
"src/strtod.h",
"src/third_party/utf8-decoder/utf8-decoder.h",
"src/torque-assembler.h",
"src/tracing/trace-event.cc",
"src/tracing/trace-event.h",
"src/tracing/traced-value.cc",
@ -2518,6 +2573,7 @@ v8_source_set("v8_base") {
"src/wasm/module-compiler.h",
"src/wasm/module-decoder.cc",
"src/wasm/module-decoder.h",
"src/wasm/object-access.h",
"src/wasm/signature-map.cc",
"src/wasm/signature-map.h",
"src/wasm/streaming-decoder.cc",
@ -2869,6 +2925,7 @@ v8_source_set("v8_base") {
defines = []
deps = [
":generate_bytecode_builtins_list",
":torque_generated_core",
":v8_headers",
":v8_libbase",
@ -2886,28 +2943,39 @@ v8_source_set("v8_base") {
} else {
sources -= [
"src/builtins/builtins-intl.cc",
"src/builtins/builtins-intl.h",
"src/char-predicates.cc",
"src/intl.cc",
"src/intl.h",
"src/objects/intl-objects-inl.h",
"src/objects/intl-objects.cc",
"src/objects/intl-objects.h",
"src/objects/js-break-iterator-inl.h",
"src/objects/js-break-iterator.cc",
"src/objects/js-break-iterator.h",
"src/objects/js-collator-inl.h",
"src/objects/js-collator.cc",
"src/objects/js-collator.h",
"src/objects/js-date-time-format-inl.h",
"src/objects/js-date-time-format.cc",
"src/objects/js-date-time-format.h",
"src/objects/js-list-format-inl.h",
"src/objects/js-list-format.cc",
"src/objects/js-list-format.h",
"src/objects/js-locale-inl.h",
"src/objects/js-locale.cc",
"src/objects/js-locale.h",
"src/objects/js-number-format-inl.h",
"src/objects/js-number-format.cc",
"src/objects/js-number-format.h",
"src/objects/js-plural-rules-inl.h",
"src/objects/js-plural-rules.cc",
"src/objects/js-plural-rules.h",
"src/objects/js-relative-time-format-inl.h",
"src/objects/js-relative-time-format.cc",
"src/objects/js-relative-time-format.h",
"src/objects/js-segmenter-inl.h",
"src/objects/js-segmenter.cc",
"src/objects/js-segmenter.h",
"src/runtime/runtime-intl.cc",
]
}
@ -2916,6 +2984,15 @@ v8_source_set("v8_base") {
sources += [ "$target_gen_dir/debug-support.cc" ]
deps += [ ":postmortem-metadata" ]
}
# Platforms that don't have CAS support need to link atomic library
# to implement atomic memory access
if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel" ||
v8_current_cpu == "mips64" || v8_current_cpu == "mips64el" ||
v8_current_cpu == "ppc" || v8_current_cpu == "ppc64" ||
v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
libs = [ "atomic" ]
}
}
v8_source_set("torque_base") {
@ -2923,7 +3000,11 @@ v8_source_set("torque_base") {
sources = [
"src/torque/ast.h",
"src/torque/cfg.cc",
"src/torque/cfg.h",
"src/torque/contextual.h",
"src/torque/csa-generator.cc",
"src/torque/csa-generator.h",
"src/torque/declarable.cc",
"src/torque/declarable.h",
"src/torque/declaration-visitor.cc",
@ -2937,6 +3018,8 @@ v8_source_set("torque_base") {
"src/torque/global-context.h",
"src/torque/implementation-visitor.cc",
"src/torque/implementation-visitor.h",
"src/torque/instructions.cc",
"src/torque/instructions.h",
"src/torque/scope.cc",
"src/torque/scope.h",
"src/torque/source-positions.cc",
@ -2956,11 +3039,15 @@ v8_source_set("torque_base") {
]
configs = [ ":internal_config" ]
if (is_win && is_asan) {
remove_configs = [ "//build/config/sanitizers:default_sanitizer_flags" ]
}
}
v8_component("v8_libbase") {
sources = [
"src/base/adapters.h",
"src/base/address-region.h",
"src/base/atomic-utils.h",
"src/base/atomicops.h",
"src/base/atomicops_internals_atomicword_compat.h",
@ -2969,6 +3056,8 @@ v8_component("v8_libbase") {
"src/base/base-export.h",
"src/base/bits.cc",
"src/base/bits.h",
"src/base/bounded-page-allocator.cc",
"src/base/bounded-page-allocator.h",
"src/base/build_config.h",
"src/base/compiler-specific.h",
"src/base/cpu.cc",
@ -2994,6 +3083,8 @@ v8_component("v8_libbase") {
"src/base/list.h",
"src/base/logging.cc",
"src/base/logging.h",
"src/base/lsan-page-allocator.cc",
"src/base/lsan-page-allocator.h",
"src/base/macros.h",
"src/base/once.cc",
"src/base/once.h",
@ -3010,6 +3101,8 @@ v8_component("v8_libbase") {
"src/base/platform/semaphore.h",
"src/base/platform/time.cc",
"src/base/platform/time.h",
"src/base/region-allocator.cc",
"src/base/region-allocator.h",
"src/base/ring-buffer.h",
"src/base/safe_conversions.h",
"src/base/safe_conversions_impl.h",
@ -3237,6 +3330,29 @@ if (v8_monolithic) {
# Executables
#
if (current_toolchain == v8_generator_toolchain) {
v8_executable("bytecode_builtins_list_generator") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
include_dirs = [ "." ]
sources = [
"src/builtins/generate-bytecodes-builtins-list.cc",
"src/interpreter/bytecode-operands.cc",
"src/interpreter/bytecode-operands.h",
"src/interpreter/bytecodes.cc",
"src/interpreter/bytecodes.h",
]
configs = [ ":internal_config" ]
deps = [
":v8_libbase",
"//build/win:default_exe_manifest",
]
}
}
if (v8_use_snapshot && current_toolchain == v8_snapshot_toolchain) {
v8_executable("mksnapshot") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
@ -3272,6 +3388,9 @@ if (current_toolchain == v8_snapshot_toolchain) {
]
configs = [ ":internal_config" ]
if (is_win && is_asan) {
remove_configs = [ "//build/config/sanitizers:default_sanitizer_flags" ]
}
}
}
@ -3325,11 +3444,32 @@ group("v8_clusterfuzz") {
}
group("v8_archive") {
testonly = true
deps = [
":d8",
"test/cctest:cctest",
]
}
# TODO(dglazkov): Remove the "!build_with_chromium" condition once this clause
# is removed from Chromium.
if (is_fuchsia && !build_with_chromium) {
import("//build/config/fuchsia/rules.gni")
fuchsia_package("d8_fuchsia_pkg") {
testonly = true
binary = ":d8"
package_name_override = "d8"
}
fuchsia_package_runner("d8_fuchsia") {
testonly = true
package = ":d8_fuchsia_pkg"
package_name_override = "d8"
}
}
group("v8_fuzzers") {
testonly = true
data_deps = [
@ -3636,6 +3776,7 @@ v8_source_set("wasm_module_runner") {
]
deps = [
":generate_bytecode_builtins_list",
":torque_generated_core",
]
@ -3719,6 +3860,7 @@ v8_source_set("lib_wasm_fuzzer_common") {
]
deps = [
":generate_bytecode_builtins_list",
":torque_generated_core",
]

1640
deps/v8/ChangeLog vendored

File diff suppressed because it is too large Load Diff

62
deps/v8/DEPS vendored
View File

@ -13,15 +13,13 @@ vars = {
deps = {
'v8/build':
Var('chromium_url') + '/chromium/src/build.git' + '@' + 'dd6b994b32b498e9e766ce60c44da0aec3a2a188',
'v8/tools/gyp':
Var('chromium_url') + '/external/gyp.git' + '@' + 'd61a9397e668fa9843c4aa7da9e79460fe590bfb',
Var('chromium_url') + '/chromium/src/build.git' + '@' + 'a7674eacc34947257c78fe6ba5cf0da17f60696c',
'v8/third_party/depot_tools':
Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + 'aaf2cc09c6874e394c6c1e4692360cc400d6b388',
Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '71e3be7a50c21faeee91ed99a8d5addfb7594e7c',
'v8/third_party/icu':
Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'a191af9d025859e8368b8b469120d78006e9f5f6',
Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'c52a2a250d6c5f5cbdd015dff36af7c5d0ae1150',
'v8/third_party/instrumented_libraries':
Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + 'd8cf40c4592dcec7fb01fcbdf1f6d4958b3fbf11',
Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + 'a90cbf3b4216430a437991fb53ede8e048dea454',
'v8/buildtools':
Var('chromium_url') + '/chromium/buildtools.git' + '@' + '2dff9c9c74e9d732e6fe57c84ef7fd044cc45d96',
'v8/base/trace_event/common':
@ -35,7 +33,7 @@ deps = {
'condition': 'checkout_android',
},
'v8/third_party/catapult': {
'url': Var('chromium_url') + '/catapult.git' + '@' + 'bc2c0a9307285fa36e03e7cdb6bf8623390ff855',
'url': Var('chromium_url') + '/catapult.git' + '@' + '9ec8468cfde0868ce5f3893e819087278c5af988',
'condition': 'checkout_android',
},
'v8/third_party/colorama/src': {
@ -43,17 +41,15 @@ deps = {
'condition': 'checkout_android',
},
'v8/third_party/fuchsia-sdk': {
'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + '3ec92c896bcbddc46e2a073ebfdd25aa1194656e',
'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + '6e1868c9083769d489d3fc25657339d50c22b1d8',
'condition': 'checkout_fuchsia',
},
'v8/third_party/googletest/src':
Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + 'd5266326752f0a1dadbd310932d8f4fd8c3c5e7d',
Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '2e68926a9d4929e9289373cd49e40ddcb9a628f7',
'v8/third_party/jinja2':
Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + 'b41863e42637544c2941b574c7877d3e1f663e25',
'v8/third_party/markupsafe':
Var('chromium_url') + '/chromium/src/third_party/markupsafe.git' + '@' + '8f45f5cfa0009d2a70589bcda0349b8cb2b72783',
'v8/third_party/proguard':
Var('chromium_url') + '/chromium/src/third_party/proguard.git' + '@' + 'a3729bea473bb5ffc5eaf289f5733bc5e2861c07',
'v8/tools/swarming_client':
Var('chromium_url') + '/infra/luci/client-py.git' + '@' + '486c9b53c4d54dd4b95bb6ce0e31160e600dfc11',
'v8/test/benchmarks/data':
@ -61,25 +57,35 @@ deps = {
'v8/test/mozilla/data':
Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be',
'v8/test/test262/data':
Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'a6c1d05ac4fed084fa047e4c52ab2a8c9c2a8aef',
Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '00cfe1628cc03164dcf03f01ba9c84376e9be735',
'v8/test/test262/harness':
Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '0f2acdd882c84cff43b9d60df7574a1901e2cdcd',
'v8/third_party/qemu': {
'v8/third_party/qemu-linux-x64': {
'packages': [
{
'package': 'fuchsia/qemu/linux-amd64',
'version': '9cc486c5b18a0be515c39a280ca9a309c54cf994'
},
],
'condition': 'checkout_fuchsia',
'condition': 'host_os == "linux" and checkout_fuchsia',
'dep_type': 'cipd',
},
'v8/third_party/qemu-mac-x64': {
'packages': [
{
'package': 'fuchsia/qemu/mac-amd64',
'version': '2d3358ae9a569b2d4a474f498b32b202a152134f'
},
],
'condition': 'host_os == "mac" and checkout_fuchsia',
'dep_type': 'cipd',
},
'v8/tools/clang':
Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'bb4146fb8a9dde405b71914657bb461dc93912ab',
Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '7792d28b069af6dd3a86d1ba83b7f5c4ede605dc',
'v8/tools/luci-go':
Var('chromium_url') + '/chromium/src/tools/luci-go.git' + '@' + '445d7c4b6a4f10e188edb395b132e3996b127691',
'v8/test/wasm-js':
Var('chromium_url') + '/external/github.com/WebAssembly/spec.git' + '@' + '240ea673de6e75d78ae472f66127301ecab22a99',
Var('chromium_url') + '/external/github.com/WebAssembly/spec.git' + '@' + 'db9cd40808a90ecc5f4a23e88fb375c8f60b8d52',
}
recursedeps = [
@ -344,13 +350,6 @@ hooks = [
'condition': 'checkout_win',
'action': ['python', 'v8/build/vs_toolchain.py', 'update'],
},
{
# Update the Mac toolchain if necessary.
'name': 'mac_toolchain',
'pattern': '.',
'condition': 'checkout_mac',
'action': ['python', 'v8/build/mac_toolchain.py'],
},
# Pull binutils for linux, enabled debug fission for faster linking /
# debugging when used with clang on Ubuntu Precise.
# https://code.google.com/p/chromium/issues/detail?id=352046
@ -387,6 +386,23 @@ hooks = [
'v8/build/fuchsia/update_sdk.py',
],
},
{
# Mac doesn't use lld so it's not included in the default clang bundle
# there. However, lld is need in Fuchsia cross builds, so
# download it there.
# Should run after the clang hook.
'name': 'lld/mac',
'pattern': '.',
'condition': 'host_os == "mac" and checkout_fuchsia',
'action': ['python', 'v8/tools/clang/scripts/download_lld_mac.py'],
},
{
# Mac does not have llvm-objdump, download it for cross builds in Fuchsia.
'name': 'llvm-objdump',
'pattern': '.',
'condition': 'host_os == "mac" and checkout_fuchsia',
'action': ['python', 'v8/tools/clang/scripts/download_objdump.py'],
},
{
'name': 'mips_toolchain',
'pattern': '.',

16
deps/v8/PRESUBMIT.py vendored
View File

@ -73,9 +73,10 @@ def _V8PresubmitChecks(input_api, output_api):
import sys
sys.path.append(input_api.os_path.join(
input_api.PresubmitLocalPath(), 'tools'))
from presubmit import CppLintProcessor
from presubmit import SourceProcessor
from presubmit import StatusFilesProcessor
from v8_presubmit import CppLintProcessor
from v8_presubmit import TorqueFormatProcessor
from v8_presubmit import SourceProcessor
from v8_presubmit import StatusFilesProcessor
def FilterFile(affected_file):
return input_api.FilterSourceFile(
@ -83,10 +84,19 @@ def _V8PresubmitChecks(input_api, output_api):
white_list=None,
black_list=_NO_LINT_PATHS)
def FilterTorqueFile(affected_file):
return input_api.FilterSourceFile(
affected_file,
white_list=(r'.+\.tq'))
results = []
if not CppLintProcessor().RunOnFiles(
input_api.AffectedFiles(file_filter=FilterFile, include_deletes=False)):
results.append(output_api.PresubmitError("C++ lint check failed"))
if not TorqueFormatProcessor().RunOnFiles(
input_api.AffectedFiles(file_filter=FilterTorqueFile,
include_deletes=False)):
results.append(output_api.PresubmitError("Torque format check failed"))
if not SourceProcessor().RunOnFiles(
input_api.AffectedFiles(include_deletes=False)):
results.append(output_api.PresubmitError(

10
deps/v8/gni/v8.gni vendored
View File

@ -143,7 +143,15 @@ template("v8_source_set") {
}
}
target(link_target_type, target_name) {
forward_variables_from(invoker, "*", [ "configs" ])
forward_variables_from(invoker,
"*",
[
"configs",
"remove_configs",
])
if (defined(invoker.remove_configs)) {
configs -= invoker.remove_configs
}
configs += invoker.configs
configs -= v8_remove_configs
configs += v8_add_configs

View File

@ -35,7 +35,7 @@ class V8_PLATFORM_EXPORT TraceObject {
const char* as_string;
};
TraceObject() {}
TraceObject() = default;
~TraceObject();
void Initialize(
char phase, const uint8_t* category_enabled_flag, const char* name,
@ -106,8 +106,8 @@ class V8_PLATFORM_EXPORT TraceObject {
class V8_PLATFORM_EXPORT TraceWriter {
public:
TraceWriter() {}
virtual ~TraceWriter() {}
TraceWriter() = default;
virtual ~TraceWriter() = default;
virtual void AppendTraceEvent(TraceObject* trace_event) = 0;
virtual void Flush() = 0;
@ -147,8 +147,8 @@ class V8_PLATFORM_EXPORT TraceBufferChunk {
class V8_PLATFORM_EXPORT TraceBuffer {
public:
TraceBuffer() {}
virtual ~TraceBuffer() {}
TraceBuffer() = default;
virtual ~TraceBuffer() = default;
virtual TraceObject* AddTraceEvent(uint64_t* handle) = 0;
virtual TraceObject* GetEventByHandle(uint64_t handle) = 0;

View File

@ -62,7 +62,7 @@ class V8_EXPORT StringView {
class V8_EXPORT StringBuffer {
public:
virtual ~StringBuffer() {}
virtual ~StringBuffer() = default;
virtual const StringView& string() = 0;
// This method copies contents.
static std::unique_ptr<StringBuffer> create(const StringView&);
@ -107,7 +107,7 @@ class V8_EXPORT V8StackTrace {
virtual StringView topScriptId() const = 0;
virtual StringView topFunctionName() const = 0;
virtual ~V8StackTrace() {}
virtual ~V8StackTrace() = default;
virtual std::unique_ptr<protocol::Runtime::API::StackTrace>
buildInspectorObject() const = 0;
virtual std::unique_ptr<StringBuffer> toString() const = 0;
@ -118,13 +118,13 @@ class V8_EXPORT V8StackTrace {
class V8_EXPORT V8InspectorSession {
public:
virtual ~V8InspectorSession() {}
virtual ~V8InspectorSession() = default;
// Cross-context inspectable values (DOM nodes in different worlds, etc.).
class V8_EXPORT Inspectable {
public:
virtual v8::Local<v8::Value> get(v8::Local<v8::Context>) = 0;
virtual ~Inspectable() {}
virtual ~Inspectable() = default;
};
virtual void addInspectedObject(std::unique_ptr<Inspectable>) = 0;
@ -162,7 +162,7 @@ class V8_EXPORT V8InspectorSession {
class V8_EXPORT V8InspectorClient {
public:
virtual ~V8InspectorClient() {}
virtual ~V8InspectorClient() = default;
virtual void runMessageLoopOnPause(int contextGroupId) {}
virtual void quitMessageLoopOnPause() {}
@ -239,7 +239,7 @@ struct V8_EXPORT V8StackTraceId {
class V8_EXPORT V8Inspector {
public:
static std::unique_ptr<V8Inspector> create(v8::Isolate*, V8InspectorClient*);
virtual ~V8Inspector() {}
virtual ~V8Inspector() = default;
// Contexts instrumentation.
virtual void contextCreated(const V8ContextInfo&) = 0;
@ -277,7 +277,7 @@ class V8_EXPORT V8Inspector {
// Connection.
class V8_EXPORT Channel {
public:
virtual ~Channel() {}
virtual ~Channel() = default;
virtual void sendResponse(int callId,
std::unique_ptr<StringBuffer> message) = 0;
virtual void sendNotification(std::unique_ptr<StringBuffer> message) = 0;

316
deps/v8/include/v8-internal.h vendored Normal file
View File

@ -0,0 +1,316 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef INCLUDE_V8_INTERNAL_H_
#define INCLUDE_V8_INTERNAL_H_
#include <stddef.h>
#include <stdint.h>
#include <type_traits>
#include "v8-version.h" // NOLINT(build/include)
#include "v8config.h" // NOLINT(build/include)
namespace v8 {
class Context;
class Data;
class Isolate;
namespace internal {
class Object;
/**
* Configuration of tagging scheme.
*/
const int kApiPointerSize = sizeof(void*); // NOLINT
const int kApiDoubleSize = sizeof(double); // NOLINT
const int kApiIntSize = sizeof(int); // NOLINT
const int kApiInt64Size = sizeof(int64_t); // NOLINT
// Tag information for HeapObject.
const int kHeapObjectTag = 1;
const int kWeakHeapObjectTag = 3;
const int kHeapObjectTagSize = 2;
const intptr_t kHeapObjectTagMask = (1 << kHeapObjectTagSize) - 1;
// Tag information for Smi.
const int kSmiTag = 0;
const int kSmiTagSize = 1;
const intptr_t kSmiTagMask = (1 << kSmiTagSize) - 1;
template <size_t tagged_ptr_size>
struct SmiTagging;
template <int kSmiShiftSize>
V8_INLINE internal::Object* IntToSmi(int value) {
int smi_shift_bits = kSmiTagSize + kSmiShiftSize;
intptr_t tagged_value =
(static_cast<intptr_t>(value) << smi_shift_bits) | kSmiTag;
return reinterpret_cast<internal::Object*>(tagged_value);
}
// Smi constants for systems where tagged pointer is a 32-bit value.
template <>
struct SmiTagging<4> {
enum { kSmiShiftSize = 0, kSmiValueSize = 31 };
static int SmiShiftSize() { return kSmiShiftSize; }
static int SmiValueSize() { return kSmiValueSize; }
V8_INLINE static int SmiToInt(const internal::Object* value) {
int shift_bits = kSmiTagSize + kSmiShiftSize;
// Throw away top 32 bits and shift down (requires >> to be sign extending).
return static_cast<int>(reinterpret_cast<intptr_t>(value)) >> shift_bits;
}
V8_INLINE static internal::Object* IntToSmi(int value) {
return internal::IntToSmi<kSmiShiftSize>(value);
}
V8_INLINE static constexpr bool IsValidSmi(intptr_t value) {
// To be representable as an tagged small integer, the two
// most-significant bits of 'value' must be either 00 or 11 due to
// sign-extension. To check this we add 01 to the two
// most-significant bits, and check if the most-significant bit is 0
//
// CAUTION: The original code below:
// bool result = ((value + 0x40000000) & 0x80000000) == 0;
// may lead to incorrect results according to the C language spec, and
// in fact doesn't work correctly with gcc4.1.1 in some cases: The
// compiler may produce undefined results in case of signed integer
// overflow. The computation must be done w/ unsigned ints.
return static_cast<uintptr_t>(value) + 0x40000000U < 0x80000000U;
}
};
// Smi constants for systems where tagged pointer is a 64-bit value.
template <>
struct SmiTagging<8> {
enum { kSmiShiftSize = 31, kSmiValueSize = 32 };
static int SmiShiftSize() { return kSmiShiftSize; }
static int SmiValueSize() { return kSmiValueSize; }
V8_INLINE static int SmiToInt(const internal::Object* value) {
int shift_bits = kSmiTagSize + kSmiShiftSize;
// Shift down and throw away top 32 bits.
return static_cast<int>(reinterpret_cast<intptr_t>(value) >> shift_bits);
}
V8_INLINE static internal::Object* IntToSmi(int value) {
return internal::IntToSmi<kSmiShiftSize>(value);
}
V8_INLINE static constexpr bool IsValidSmi(intptr_t value) {
// To be representable as a long smi, the value must be a 32-bit integer.
return (value == static_cast<int32_t>(value));
}
};
#if V8_COMPRESS_POINTERS
static_assert(
kApiPointerSize == kApiInt64Size,
"Pointer compression can be enabled only for 64-bit architectures");
typedef SmiTagging<4> PlatformSmiTagging;
#else
typedef SmiTagging<kApiPointerSize> PlatformSmiTagging;
#endif
const int kSmiShiftSize = PlatformSmiTagging::kSmiShiftSize;
const int kSmiValueSize = PlatformSmiTagging::kSmiValueSize;
const int kSmiMinValue = (static_cast<unsigned int>(-1)) << (kSmiValueSize - 1);
const int kSmiMaxValue = -(kSmiMinValue + 1);
constexpr bool SmiValuesAre31Bits() { return kSmiValueSize == 31; }
constexpr bool SmiValuesAre32Bits() { return kSmiValueSize == 32; }
/**
* This class exports constants and functionality from within v8 that
* is necessary to implement inline functions in the v8 api. Don't
* depend on functions and constants defined here.
*/
class Internals {
public:
// These values match non-compiler-dependent values defined within
// the implementation of v8.
static const int kHeapObjectMapOffset = 0;
static const int kMapInstanceTypeOffset = 1 * kApiPointerSize + kApiIntSize;
static const int kStringResourceOffset =
1 * kApiPointerSize + 2 * kApiIntSize;
static const int kOddballKindOffset = 4 * kApiPointerSize + kApiDoubleSize;
static const int kForeignAddressOffset = kApiPointerSize;
static const int kJSObjectHeaderSize = 3 * kApiPointerSize;
static const int kFixedArrayHeaderSize = 2 * kApiPointerSize;
static const int kContextHeaderSize = 2 * kApiPointerSize;
static const int kContextEmbedderDataIndex = 5;
static const int kFullStringRepresentationMask = 0x0f;
static const int kStringEncodingMask = 0x8;
static const int kExternalTwoByteRepresentationTag = 0x02;
static const int kExternalOneByteRepresentationTag = 0x0a;
static const int kIsolateEmbedderDataOffset = 0 * kApiPointerSize;
static const int kExternalMemoryOffset = 4 * kApiPointerSize;
static const int kExternalMemoryLimitOffset =
kExternalMemoryOffset + kApiInt64Size;
static const int kExternalMemoryAtLastMarkCompactOffset =
kExternalMemoryLimitOffset + kApiInt64Size;
static const int kIsolateRootsOffset = kExternalMemoryLimitOffset +
kApiInt64Size + kApiInt64Size +
kApiPointerSize + kApiPointerSize;
static const int kUndefinedValueRootIndex = 4;
static const int kTheHoleValueRootIndex = 5;
static const int kNullValueRootIndex = 6;
static const int kTrueValueRootIndex = 7;
static const int kFalseValueRootIndex = 8;
static const int kEmptyStringRootIndex = 9;
static const int kNodeClassIdOffset = 1 * kApiPointerSize;
static const int kNodeFlagsOffset = 1 * kApiPointerSize + 3;
static const int kNodeStateMask = 0x7;
static const int kNodeStateIsWeakValue = 2;
static const int kNodeStateIsPendingValue = 3;
static const int kNodeStateIsNearDeathValue = 4;
static const int kNodeIsIndependentShift = 3;
static const int kNodeIsActiveShift = 4;
static const int kFirstNonstringType = 0x80;
static const int kOddballType = 0x83;
static const int kForeignType = 0x87;
static const int kJSSpecialApiObjectType = 0x410;
static const int kJSApiObjectType = 0x420;
static const int kJSObjectType = 0x421;
static const int kUndefinedOddballKind = 5;
static const int kNullOddballKind = 3;
static const uint32_t kNumIsolateDataSlots = 4;
// Soft limit for AdjustAmountofExternalAllocatedMemory. Trigger an
// incremental GC once the external memory reaches this limit.
static constexpr int kExternalAllocationSoftLimit = 64 * 1024 * 1024;
V8_EXPORT static void CheckInitializedImpl(v8::Isolate* isolate);
V8_INLINE static void CheckInitialized(v8::Isolate* isolate) {
#ifdef V8_ENABLE_CHECKS
CheckInitializedImpl(isolate);
#endif
}
V8_INLINE static bool HasHeapObjectTag(const internal::Object* value) {
return ((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) ==
kHeapObjectTag);
}
V8_INLINE static int SmiValue(const internal::Object* value) {
return PlatformSmiTagging::SmiToInt(value);
}
V8_INLINE static internal::Object* IntToSmi(int value) {
return PlatformSmiTagging::IntToSmi(value);
}
V8_INLINE static constexpr bool IsValidSmi(intptr_t value) {
return PlatformSmiTagging::IsValidSmi(value);
}
V8_INLINE static int GetInstanceType(const internal::Object* obj) {
typedef internal::Object O;
O* map = ReadField<O*>(obj, kHeapObjectMapOffset);
return ReadField<uint16_t>(map, kMapInstanceTypeOffset);
}
V8_INLINE static int GetOddballKind(const internal::Object* obj) {
typedef internal::Object O;
return SmiValue(ReadField<O*>(obj, kOddballKindOffset));
}
V8_INLINE static bool IsExternalTwoByteString(int instance_type) {
int representation = (instance_type & kFullStringRepresentationMask);
return representation == kExternalTwoByteRepresentationTag;
}
V8_INLINE static uint8_t GetNodeFlag(internal::Object** obj, int shift) {
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
return *addr & static_cast<uint8_t>(1U << shift);
}
V8_INLINE static void UpdateNodeFlag(internal::Object** obj, bool value,
int shift) {
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
uint8_t mask = static_cast<uint8_t>(1U << shift);
*addr = static_cast<uint8_t>((*addr & ~mask) | (value << shift));
}
V8_INLINE static uint8_t GetNodeState(internal::Object** obj) {
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
return *addr & kNodeStateMask;
}
V8_INLINE static void UpdateNodeState(internal::Object** obj, uint8_t value) {
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
*addr = static_cast<uint8_t>((*addr & ~kNodeStateMask) | value);
}
V8_INLINE static void SetEmbedderData(v8::Isolate* isolate, uint32_t slot,
void* data) {
uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) +
kIsolateEmbedderDataOffset + slot * kApiPointerSize;
*reinterpret_cast<void**>(addr) = data;
}
V8_INLINE static void* GetEmbedderData(const v8::Isolate* isolate,
uint32_t slot) {
const uint8_t* addr = reinterpret_cast<const uint8_t*>(isolate) +
kIsolateEmbedderDataOffset + slot * kApiPointerSize;
return *reinterpret_cast<void* const*>(addr);
}
V8_INLINE static internal::Object** GetRoot(v8::Isolate* isolate, int index) {
uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) + kIsolateRootsOffset;
return reinterpret_cast<internal::Object**>(addr + index * kApiPointerSize);
}
template <typename T>
V8_INLINE static T ReadField(const internal::Object* ptr, int offset) {
const uint8_t* addr =
reinterpret_cast<const uint8_t*>(ptr) + offset - kHeapObjectTag;
return *reinterpret_cast<const T*>(addr);
}
template <typename T>
V8_INLINE static T ReadEmbedderData(const v8::Context* context, int index) {
typedef internal::Object O;
typedef internal::Internals I;
O* ctx = *reinterpret_cast<O* const*>(context);
int embedder_data_offset =
I::kContextHeaderSize +
(internal::kApiPointerSize * I::kContextEmbedderDataIndex);
O* embedder_data = I::ReadField<O*>(ctx, embedder_data_offset);
int value_offset =
I::kFixedArrayHeaderSize + (internal::kApiPointerSize * index);
return I::ReadField<T>(embedder_data, value_offset);
}
};
// Only perform cast check for types derived from v8::Data since
// other types do not implement the Cast method.
template <bool PerformCheck>
struct CastCheck {
template <class T>
static void Perform(T* data);
};
template <>
template <class T>
void CastCheck<true>::Perform(T* data) {
T::Cast(data);
}
template <>
template <class T>
void CastCheck<false>::Perform(T* data) {}
template <class T>
V8_INLINE void PerformCastCheck(T* data) {
CastCheck<std::is_base_of<Data, T>::value>::Perform(data);
}
} // namespace internal
} // namespace v8
#endif // INCLUDE_V8_INTERNAL_H_

View File

@ -322,7 +322,9 @@ class Platform {
* |isolate|. Tasks posted for the same isolate should be execute in order of
* scheduling. The definition of "foreground" is opaque to V8.
*/
virtual void CallOnForegroundThread(Isolate* isolate, Task* task) = 0;
V8_DEPRECATE_SOON(
"Use a taskrunner acquired by GetForegroundTaskRunner instead.",
virtual void CallOnForegroundThread(Isolate* isolate, Task* task)) = 0;
/**
* Schedules a task to be invoked on a foreground thread wrt a specific
@ -330,8 +332,10 @@ class Platform {
* Tasks posted for the same isolate should be execute in order of
* scheduling. The definition of "foreground" is opaque to V8.
*/
virtual void CallDelayedOnForegroundThread(Isolate* isolate, Task* task,
double delay_in_seconds) = 0;
V8_DEPRECATE_SOON(
"Use a taskrunner acquired by GetForegroundTaskRunner instead.",
virtual void CallDelayedOnForegroundThread(Isolate* isolate, Task* task,
double delay_in_seconds)) = 0;
/**
* Schedules a task to be invoked on a foreground thread wrt a specific
@ -341,7 +345,10 @@ class Platform {
* starved for an arbitrarily long time if no idle time is available.
* The definition of "foreground" is opaque to V8.
*/
virtual void CallIdleOnForegroundThread(Isolate* isolate, IdleTask* task) {
V8_DEPRECATE_SOON(
"Use a taskrunner acquired by GetForegroundTaskRunner instead.",
virtual void CallIdleOnForegroundThread(Isolate* isolate,
IdleTask* task)) {
// This must be overriden if |IdleTasksEnabled()|.
abort();
}

View File

@ -341,12 +341,6 @@ class V8_EXPORT CpuProfiler {
V8_DEPRECATED("Use Isolate::SetIdle(bool) instead.",
void SetIdle(bool is_idle));
/**
* Generate more detailed source positions to code objects. This results in
* better results when mapping profiling samples to script source.
*/
static void UseDetailedSourcePositionsForProfiling(Isolate* isolate);
private:
CpuProfiler();
~CpuProfiler();
@ -451,7 +445,7 @@ class V8_EXPORT OutputStream { // NOLINT
kContinue = 0,
kAbort = 1
};
virtual ~OutputStream() {}
virtual ~OutputStream() = default;
/** Notify about the end of stream. */
virtual void EndOfStream() = 0;
/** Get preferred output chunk size. Called only once. */
@ -545,7 +539,7 @@ class V8_EXPORT ActivityControl { // NOLINT
kContinue = 0,
kAbort = 1
};
virtual ~ActivityControl() {}
virtual ~ActivityControl() = default;
/**
* Notify about current progress. The activity can be stopped by
* returning kAbort as the callback result.
@ -631,7 +625,7 @@ class V8_EXPORT AllocationProfile {
*/
virtual Node* GetRootNode() = 0;
virtual ~AllocationProfile() {}
virtual ~AllocationProfile() = default;
static const int kNoLineNumberInfo = Message::kNoLineNumberInfo;
static const int kNoColumnNumberInfo = Message::kNoColumnInfo;
@ -799,15 +793,15 @@ class V8_EXPORT HeapProfiler {
virtual const char* GetName(Local<Object> object) = 0;
protected:
virtual ~ObjectNameResolver() {}
virtual ~ObjectNameResolver() = default;
};
/**
* Takes a heap snapshot and returns it.
*/
const HeapSnapshot* TakeHeapSnapshot(
ActivityControl* control = NULL,
ObjectNameResolver* global_object_name_resolver = NULL);
ActivityControl* control = nullptr,
ObjectNameResolver* global_object_name_resolver = nullptr);
/**
* Starts tracking of heap objects population statistics. After calling
@ -834,7 +828,7 @@ class V8_EXPORT HeapProfiler {
* method.
*/
SnapshotObjectId GetHeapStats(OutputStream* stream,
int64_t* timestamp_us = NULL);
int64_t* timestamp_us = nullptr);
/**
* Stops tracking of heap objects population statistics, cleans up all
@ -991,8 +985,8 @@ class V8_EXPORT RetainedObjectInfo { // NOLINT
virtual intptr_t GetSizeInBytes() { return -1; }
protected:
RetainedObjectInfo() {}
virtual ~RetainedObjectInfo() {}
RetainedObjectInfo() = default;
virtual ~RetainedObjectInfo() = default;
private:
RetainedObjectInfo(const RetainedObjectInfo&);

View File

@ -94,11 +94,11 @@ class DefaultPersistentValueMapTraits : public StdMapTraits<K, V> {
static WeakCallbackDataType* WeakCallbackParameter(
MapType* map, const K& key, Local<V> value) {
return NULL;
return nullptr;
}
static MapType* MapFromWeakCallbackInfo(
const WeakCallbackInfo<WeakCallbackDataType>& data) {
return NULL;
return nullptr;
}
static K KeyFromWeakCallbackInfo(
const WeakCallbackInfo<WeakCallbackDataType>& data) {
@ -302,7 +302,7 @@ class PersistentValueMapBase {
static PersistentContainerValue ClearAndLeak(Global<V>* persistent) {
V* v = persistent->val_;
persistent->val_ = 0;
persistent->val_ = nullptr;
return reinterpret_cast<PersistentContainerValue>(v);
}
@ -633,7 +633,7 @@ class PersistentValueVector {
private:
static PersistentContainerValue ClearAndLeak(Global<V>* persistent) {
V* v = persistent->val_;
persistent->val_ = 0;
persistent->val_ = nullptr;
return reinterpret_cast<PersistentContainerValue>(v);
}

View File

@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 7
#define V8_MINOR_VERSION 0
#define V8_BUILD_NUMBER 276
#define V8_PATCH_LEVEL 38
#define V8_MINOR_VERSION 1
#define V8_BUILD_NUMBER 302
#define V8_PATCH_LEVEL 28
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)

764
deps/v8/include/v8.h vendored

File diff suppressed because it is too large Load Diff

View File

@ -420,6 +420,36 @@ namespace v8 { template <typename T> class AlignOfHelper { char c; T t; }; }
#define V8_WARN_UNUSED_RESULT /* NOT SUPPORTED */
#endif
#ifdef V8_OS_WIN
// Setup for Windows DLL export/import. When building the V8 DLL the
// BUILDING_V8_SHARED needs to be defined. When building a program which uses
// the V8 DLL USING_V8_SHARED needs to be defined. When either building the V8
// static library or building a program which uses the V8 static library neither
// BUILDING_V8_SHARED nor USING_V8_SHARED should be defined.
#ifdef BUILDING_V8_SHARED
# define V8_EXPORT __declspec(dllexport)
#elif USING_V8_SHARED
# define V8_EXPORT __declspec(dllimport)
#else
# define V8_EXPORT
#endif // BUILDING_V8_SHARED
#else // V8_OS_WIN
// Setup for Linux shared library export.
#if V8_HAS_ATTRIBUTE_VISIBILITY
# ifdef BUILDING_V8_SHARED
# define V8_EXPORT __attribute__ ((visibility("default")))
# else
# define V8_EXPORT
# endif
#else
# define V8_EXPORT
#endif
#endif // V8_OS_WIN
// clang-format on
#endif // V8CONFIG_H_

View File

@ -108,6 +108,23 @@ verifiers {
triggered_by: "v8_win_rel_ng"
}
}
# TODO(machenbach): Remove after testing in practice and migrate to
# PRESUBMIT.py scripts.
buckets {
name: "luci.chromium.try"
builders {
name: "cast_shell_android"
experiment_percentage: 20
}
builders {
name: "cast_shell_linux"
experiment_percentage: 20
}
builders {
name: "linux-chromeos-rel"
experiment_percentage: 20
}
}
}
}

View File

@ -27,24 +27,12 @@
'mips64el.debug': 'default_debug_mips64el',
'mips64el.optdebug': 'default_optdebug_mips64el',
'mips64el.release': 'default_release_mips64el',
'ppc.debug': 'default_debug_ppc',
'ppc.optdebug': 'default_optdebug_ppc',
'ppc.release': 'default_release_ppc',
'ppc.debug.sim': 'default_debug_ppc_sim',
'ppc.optdebug.sim': 'default_optdebug_ppc_sim',
'ppc.release.sim': 'default_release_ppc_sim',
'ppc64.debug': 'default_debug_ppc64',
'ppc64.optdebug': 'default_optdebug_ppc64',
'ppc64.release': 'default_release_ppc64',
'ppc64.debug.sim': 'default_debug_ppc64_sim',
'ppc64.optdebug.sim': 'default_optdebug_ppc64_sim',
'ppc64.release.sim': 'default_release_ppc64_sim',
's390.debug': 'default_debug_s390',
's390.optdebug': 'default_optdebug_s390',
's390.release': 'default_release_s390',
's390.debug.sim': 'default_debug_s390_sim',
's390.optdebug.sim': 'default_optdebug_s390_sim',
's390.release.sim': 'default_release_s390_sim',
's390x.debug': 'default_debug_s390x',
's390x.optdebug': 'default_optdebug_s390x',
's390x.release': 'default_release_s390x',
@ -54,6 +42,7 @@
'x64.debug': 'default_debug_x64',
'x64.optdebug': 'default_optdebug_x64',
'x64.release': 'default_release_x64',
'x64.release.sample': 'release_x64_sample',
},
'client.dynamorio': {
'linux-v8-dr': 'release_x64',
@ -102,6 +91,8 @@
'V8 Linux gcc 4.8': 'release_x86_gcc',
'V8 Linux64 gcc 4.8 - debug': 'debug_x64_gcc',
# FYI.
'V8 Linux - embedded builtins': 'release_x86_embedded_builtins',
'V8 Linux - embedded builtins - debug': 'debug_x86_embedded_builtins',
'V8 Fuchsia': 'release_x64_fuchsia',
'V8 Fuchsia - debug': 'debug_x64_fuchsia',
'V8 Linux64 - cfi': 'release_x64_cfi',
@ -138,8 +129,6 @@
'debug_simulate_arm64_asan_edge',
'V8 Clusterfuzz Linux ASAN arm - debug builder':
'debug_simulate_arm_asan_edge',
'V8 Clusterfuzz Linux ASAN mipsel - debug builder':
'debug_simulate_mipsel_asan_edge',
'V8 Clusterfuzz Linux64 CFI - release builder':
'release_x64_cfi_clusterfuzz',
'V8 Clusterfuzz Linux MSAN no origins':
@ -169,11 +158,8 @@
'V8 Mips - builder': 'release_mips_no_snap_no_i18n',
'V8 Linux - mipsel - sim - builder': 'release_simulate_mipsel',
'V8 Linux - mips64el - sim - builder': 'release_simulate_mips64el',
# PPC.
'V8 Linux - ppc - sim': 'release_simulate_ppc',
# IBM.
'V8 Linux - ppc64 - sim': 'release_simulate_ppc64',
# S390.
'V8 Linux - s390 - sim': 'release_simulate_s390',
'V8 Linux - s390x - sim': 'release_simulate_s390x',
},
'client.v8.branches': {
@ -193,12 +179,8 @@
'V8 mips64el - sim - stable branch': 'release_simulate_mips64el',
'V8 mipsel - sim - beta branch': 'release_simulate_mipsel',
'V8 mipsel - sim - stable branch': 'release_simulate_mipsel',
'V8 ppc - sim - beta branch': 'release_simulate_ppc',
'V8 ppc - sim - stable branch': 'release_simulate_ppc',
'V8 ppc64 - sim - beta branch': 'release_simulate_ppc64',
'V8 ppc64 - sim - stable branch': 'release_simulate_ppc64',
'V8 s390 - sim - beta branch': 'release_simulate_s390',
'V8 s390 - sim - stable branch': 'release_simulate_s390',
'V8 s390x - sim - beta branch': 'release_simulate_s390x',
'V8 s390x - sim - stable branch': 'release_simulate_s390x',
},
@ -207,7 +189,9 @@
'v8_android_arm64_compile_dbg': 'debug_android_arm64',
'v8_android_arm64_n5x_rel_ng': 'release_android_arm64',
'v8_fuchsia_rel_ng': 'release_x64_fuchsia_trybot',
'v8_linux_embedded_builtins_rel_ng': 'release_x86_embedded_builtins_trybot',
'v8_linux_rel_ng': 'release_x86_gcmole_trybot',
'v8_linux_optional_rel_ng': 'release_x86_trybot',
'v8_linux_verify_csa_rel_ng': 'release_x86_verify_csa',
'v8_linux_nodcheck_rel_ng': 'release_x86_minimal_symbols',
'v8_linux_dbg_ng': 'debug_x86_trybot',
@ -218,6 +202,7 @@
'v8_linux_gcc_compile_rel': 'release_x86_gcc_minimal_symbols',
'v8_linux_gcc_rel': 'release_x86_gcc_minimal_symbols',
'v8_linux_shared_compile_rel': 'release_x86_shared_verify_heap',
'v8_linux64_compile_rel_xg': 'release_x64_test_features_trybot',
'v8_linux64_dbg_ng': 'debug_x64_trybot',
'v8_linux64_gcc_compile_dbg': 'debug_x64_gcc',
'v8_linux64_header_includes_dbg': 'debug_x64_header_includes',
@ -235,6 +220,7 @@
'v8_linux64_tsan_isolates_rel_ng':
'release_x64_tsan_minimal_symbols',
'v8_linux64_ubsan_rel_ng': 'release_x64_ubsan_vptr_minimal_symbols',
'v8_odroid_arm_rel_ng': 'release_arm',
# TODO(machenbach): Remove after switching to x64 on infra side.
'v8_win_dbg': 'debug_x86_trybot',
'v8_win_compile_dbg': 'debug_x86_trybot',
@ -280,7 +266,7 @@
'default_optdebug_android_arm': [
'debug', 'arm', 'android', 'v8_enable_slow_dchecks' ],
'default_release_android_arm': [
'release', 'arm', 'android'],
'release', 'arm', 'android', 'android_strip_outputs'],
'default_debug_arm64': [
'debug', 'simulate_arm64', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_arm64': [
@ -299,18 +285,6 @@
'debug', 'simulate_mips64el', 'v8_enable_slow_dchecks'],
'default_release_mips64el': [
'release', 'simulate_mips64el'],
'default_debug_ppc': [
'debug', 'ppc', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_ppc': [
'debug', 'ppc', 'v8_enable_slow_dchecks'],
'default_release_ppc': [
'release', 'ppc'],
'default_debug_ppc_sim': [
'debug', 'simulate_ppc', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_ppc_sim': [
'debug', 'simulate_ppc', 'v8_enable_slow_dchecks'],
'default_release_ppc_sim': [
'release', 'simulate_ppc'],
'default_debug_ppc64': [
'debug', 'ppc64', 'gcc', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_ppc64': [
@ -323,18 +297,6 @@
'debug', 'simulate_ppc64', 'v8_enable_slow_dchecks'],
'default_release_ppc64_sim': [
'release', 'simulate_ppc64'],
'default_debug_s390': [
'debug', 's390', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_s390': [
'debug', 's390', 'v8_enable_slow_dchecks'],
'default_release_s390': [
'release', 's390'],
'default_debug_s390_sim': [
'debug', 'simulate_s390', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_s390_sim': [
'debug', 'simulate_s390', 'v8_enable_slow_dchecks'],
'default_release_s390_sim': [
'release', 'simulate_s390'],
'default_debug_s390x': [
'debug', 's390x', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_s390x': [
@ -353,6 +315,8 @@
'debug', 'x64', 'v8_enable_slow_dchecks'],
'default_release_x64': [
'release', 'x64'],
'release_x64_sample': [
'release', 'x64', 'sample'],
'default_debug_x86': [
'debug', 'x86', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_x86': [
@ -373,8 +337,6 @@
'debug_simulate_arm64_no_snap': [
'debug', 'simulate_arm64', 'shared', 'goma', 'v8_optimized_debug',
'v8_snapshot_none'],
'debug_simulate_mipsel_asan_edge': [
'debug_bot', 'simulate_mipsel', 'asan', 'edge'],
# Release configs for simulators.
'release_simulate_arm': [
@ -397,12 +359,8 @@
'release_bot', 'simulate_mipsel'],
'release_simulate_mips64el': [
'release_bot', 'simulate_mips64el'],
'release_simulate_ppc': [
'release_bot', 'simulate_ppc'],
'release_simulate_ppc64': [
'release_bot', 'simulate_ppc64'],
'release_simulate_s390': [
'release_bot', 'simulate_s390'],
'release_simulate_s390x': [
'release_bot', 'simulate_s390x'],
@ -416,9 +374,11 @@
'release_arm': [
'release_bot', 'arm', 'hard_float'],
'release_android_arm': [
'release_bot', 'arm', 'android', 'minimal_symbols'],
'release_bot', 'arm', 'android', 'minimal_symbols',
'android_strip_outputs'],
'release_android_arm64': [
'release_bot', 'arm64', 'android', 'minimal_symbols'],
'release_bot', 'arm64', 'android', 'minimal_symbols',
'android_strip_outputs'],
# Release configs for x64.
'release_x64': [
@ -519,6 +479,9 @@
# Debug configs for x86.
'debug_x86': [
'debug_bot', 'x86'],
'debug_x86_embedded_builtins': [
'debug_bot', 'x86', 'v8_enable_embedded_builtins',
'v8_no_untrusted_code_mitigations'],
'debug_x86_minimal_symbols': [
'debug_bot', 'x86', 'minimal_symbols'],
'debug_x86_no_i18n': [
@ -538,6 +501,12 @@
# Release configs for x86.
'release_x86': [
'release_bot', 'x86'],
'release_x86_embedded_builtins': [
'release_bot', 'x86', 'v8_enable_embedded_builtins',
'v8_no_untrusted_code_mitigations'],
'release_x86_embedded_builtins_trybot': [
'release_trybot', 'x86', 'v8_enable_embedded_builtins',
'v8_no_untrusted_code_mitigations'],
'release_x86_gcc': [
'release_bot', 'x86', 'gcc'],
'release_x86_gcc_minimal_symbols': [
@ -580,6 +549,10 @@
'gn_args': 'target_os="android" v8_android_log_stdout=true',
},
'android_strip_outputs': {
'gn_args': 'android_unstripped_runtime_outputs=false',
},
'arm': {
'gn_args': 'target_cpu="arm"',
},
@ -625,14 +598,10 @@
'gn_args': 'is_debug=true v8_enable_backtrace=true',
},
'v8_use_multi_snapshots': {
'gn_args': 'v8_use_multi_snapshots=true',
},
'debug_bot': {
'mixins': [
'debug', 'shared', 'goma', 'v8_enable_slow_dchecks',
'v8_use_multi_snapshots', 'v8_optimized_debug'],
'v8_optimized_debug'],
},
'debug_trybot': {
@ -715,11 +684,11 @@
},
'release': {
'gn_args': 'is_debug=false android_unstripped_runtime_outputs=false',
'gn_args': 'is_debug=false',
},
'release_bot': {
'mixins': ['release', 'static', 'goma', 'v8_use_multi_snapshots'],
'mixins': ['release', 'static', 'goma'],
},
'release_trybot': {
@ -747,18 +716,10 @@
'gn_args': 'target_cpu="x64" v8_target_cpu="mips64el"',
},
'simulate_ppc': {
'gn_args': 'target_cpu="x86" v8_target_cpu="ppc"',
},
'simulate_ppc64': {
'gn_args': 'target_cpu="x64" v8_target_cpu="ppc64"',
},
'simulate_s390': {
'gn_args': 'target_cpu="x86" v8_target_cpu="s390"',
},
'simulate_s390x': {
'gn_args': 'target_cpu="x64" v8_target_cpu="s390x"',
},
@ -808,6 +769,10 @@
'gn_args': 'v8_correctness_fuzzer=true v8_multi_arch_build=true',
},
'v8_enable_embedded_builtins': {
'gn_args': 'v8_enable_embedded_builtins=true',
},
'v8_enable_slow_dchecks': {
'gn_args': 'v8_enable_slow_dchecks=true',
},
@ -853,6 +818,10 @@
'gn_args': 'v8_use_snapshot=false',
},
'v8_no_untrusted_code_mitigations': {
'gn_args': 'v8_untrusted_code_mitigations=false',
},
'v8_verify_heap': {
'gn_args': 'v8_enable_verify_heap=true',
},
@ -861,18 +830,10 @@
'gn_args': 'v8_enable_verify_csa=true',
},
's390': {
'gn_args': 'target_cpu="s390x" v8_target_cpu="s390"',
},
's390x': {
'gn_args': 'target_cpu="s390x" v8_target_cpu="s390x"',
},
'ppc': {
'gn_args': 'target_cpu="ppc"',
},
'ppc64': {
'gn_args': 'target_cpu="ppc64" use_custom_libcxx=false',
},
@ -885,5 +846,9 @@
'gn_args': 'target_cpu="x86"',
},
'sample': {
'gn_args': 'v8_monolithic=true is_component_build=false '
'v8_use_external_startup_data=false use_custom_libcxx=false',
},
},
}

2
deps/v8/src/DEPS vendored
View File

@ -13,6 +13,7 @@ include_rules = [
"+src/heap/heap.h",
"+src/heap/heap-inl.h",
"+src/heap/heap-write-barrier-inl.h",
"+src/heap/heap-write-barrier.h",
"-src/inspector",
"-src/interpreter",
"+src/interpreter/bytecode-array-accessor.h",
@ -30,6 +31,7 @@ include_rules = [
"+testing/gtest/include/gtest/gtest_prod.h",
"-src/libplatform",
"-include/libplatform",
"+builtins-generated",
"+torque-generated"
]

View File

@ -31,7 +31,8 @@ Handle<AccessorInfo> Accessors::MakeAccessor(
info->set_is_special_data_property(true);
info->set_is_sloppy(false);
info->set_replace_on_access(false);
info->set_has_no_side_effect(false);
info->set_getter_side_effect_type(SideEffectType::kHasSideEffect);
info->set_setter_side_effect_type(SideEffectType::kHasSideEffect);
name = factory->InternalizeName(name);
info->set_name(*name);
Handle<Object> get = v8::FromCData(isolate, getter);
@ -70,7 +71,7 @@ bool Accessors::IsJSObjectFieldAccessor(Isolate* isolate, Handle<Map> map,
default:
if (map->instance_type() < FIRST_NONSTRING_TYPE) {
return CheckForName(isolate, name, isolate->factory()->length_string(),
String::kLengthOffset, FieldIndex::kTagged, index);
String::kLengthOffset, FieldIndex::kWord32, index);
}
return false;

View File

@ -22,27 +22,28 @@ class JavaScriptFrame;
// The list of accessor descriptors. This is a second-order macro
// taking a macro to be applied to all accessor descriptor names.
#define ACCESSOR_INFO_LIST(V) \
V(arguments_iterator, ArgumentsIterator) \
V(array_length, ArrayLength) \
V(bound_function_length, BoundFunctionLength) \
V(bound_function_name, BoundFunctionName) \
V(error_stack, ErrorStack) \
V(function_arguments, FunctionArguments) \
V(function_caller, FunctionCaller) \
V(function_name, FunctionName) \
V(function_length, FunctionLength) \
V(function_prototype, FunctionPrototype) \
V(string_length, StringLength)
#define SIDE_EFFECT_FREE_ACCESSOR_INFO_LIST(V) \
V(ArrayLength) \
V(BoundFunctionLength) \
V(BoundFunctionName) \
V(FunctionName) \
V(FunctionLength) \
V(FunctionPrototype) \
V(StringLength)
// V(accessor_name, AccessorName, GetterSideEffectType, SetterSideEffectType)
#define ACCESSOR_INFO_LIST_GENERATOR(V, _) \
V(_, arguments_iterator, ArgumentsIterator, kHasNoSideEffect, \
kHasSideEffectToReceiver) \
V(_, array_length, ArrayLength, kHasNoSideEffect, kHasSideEffectToReceiver) \
V(_, bound_function_length, BoundFunctionLength, kHasNoSideEffect, \
kHasSideEffectToReceiver) \
V(_, bound_function_name, BoundFunctionName, kHasNoSideEffect, \
kHasSideEffectToReceiver) \
V(_, error_stack, ErrorStack, kHasSideEffectToReceiver, \
kHasSideEffectToReceiver) \
V(_, function_arguments, FunctionArguments, kHasNoSideEffect, \
kHasSideEffectToReceiver) \
V(_, function_caller, FunctionCaller, kHasNoSideEffect, \
kHasSideEffectToReceiver) \
V(_, function_name, FunctionName, kHasNoSideEffect, \
kHasSideEffectToReceiver) \
V(_, function_length, FunctionLength, kHasNoSideEffect, \
kHasSideEffectToReceiver) \
V(_, function_prototype, FunctionPrototype, kHasNoSideEffect, \
kHasSideEffectToReceiver) \
V(_, string_length, StringLength, kHasNoSideEffect, kHasSideEffectToReceiver)
#define ACCESSOR_SETTER_LIST(V) \
V(ArrayLengthSetter) \
@ -55,11 +56,11 @@ class JavaScriptFrame;
class Accessors : public AllStatic {
public:
#define ACCESSOR_GETTER_DECLARATION(accessor_name, AccessorName) \
static void AccessorName##Getter( \
v8::Local<v8::Name> name, \
#define ACCESSOR_GETTER_DECLARATION(_, accessor_name, AccessorName, ...) \
static void AccessorName##Getter( \
v8::Local<v8::Name> name, \
const v8::PropertyCallbackInfo<v8::Value>& info);
ACCESSOR_INFO_LIST(ACCESSOR_GETTER_DECLARATION)
ACCESSOR_INFO_LIST_GENERATOR(ACCESSOR_GETTER_DECLARATION, /* not used */)
#undef ACCESSOR_GETTER_DECLARATION
#define ACCESSOR_SETTER_DECLARATION(accessor_name) \
@ -71,7 +72,7 @@ class Accessors : public AllStatic {
static constexpr int kAccessorInfoCount =
#define COUNT_ACCESSOR(...) +1
ACCESSOR_INFO_LIST(COUNT_ACCESSOR);
ACCESSOR_INFO_LIST_GENERATOR(COUNT_ACCESSOR, /* not used */);
#undef COUNT_ACCESSOR
static constexpr int kAccessorSetterCount =
@ -118,9 +119,9 @@ class Accessors : public AllStatic {
AccessorNameBooleanSetterCallback setter);
private:
#define ACCESSOR_INFO_DECLARATION(accessor_name, AccessorName) \
#define ACCESSOR_INFO_DECLARATION(_, accessor_name, AccessorName, ...) \
static Handle<AccessorInfo> Make##AccessorName##Info(Isolate* isolate);
ACCESSOR_INFO_LIST(ACCESSOR_INFO_DECLARATION)
ACCESSOR_INFO_LIST_GENERATOR(ACCESSOR_INFO_DECLARATION, /* not used */)
#undef ACCESSOR_INFO_DECLARATION
friend class Heap;

View File

@ -14,8 +14,8 @@ RootIndexMap::RootIndexMap(Isolate* isolate) {
map_ = isolate->root_index_map();
if (map_ != nullptr) return;
map_ = new HeapObjectToIndexHashMap();
for (uint32_t i = 0; i < Heap::kStrongRootListLength; i++) {
Heap::RootListIndex root_index = static_cast<Heap::RootListIndex>(i);
for (RootIndex root_index = RootIndex::kFirstStrongRoot;
root_index <= RootIndex::kLastStrongRoot; ++root_index) {
Object* root = isolate->heap()->root(root_index);
if (!root->IsHeapObject()) continue;
// Omit root entries that can be written after initialization. They must
@ -25,11 +25,12 @@ RootIndexMap::RootIndexMap(Isolate* isolate) {
if (isolate->heap()->RootCanBeTreatedAsConstant(root_index)) {
HeapObject* heap_object = HeapObject::cast(root);
Maybe<uint32_t> maybe_index = map_->Get(heap_object);
uint32_t index = static_cast<uint32_t>(root_index);
if (maybe_index.IsJust()) {
// Some are initialized to a previous value in the root list.
DCHECK_LT(maybe_index.FromJust(), i);
DCHECK_LT(maybe_index.FromJust(), index);
} else {
map_->Set(heap_object, i);
map_->Set(heap_object, index);
}
} else {
// Immortal immovable root objects are constant and allocated on the first

View File

@ -56,11 +56,14 @@ class RootIndexMap {
public:
explicit RootIndexMap(Isolate* isolate);
static const int kInvalidRootIndex = -1;
int Lookup(HeapObject* obj) {
// Returns true on successful lookup and sets *|out_root_list|.
bool Lookup(HeapObject* obj, RootIndex* out_root_list) {
Maybe<uint32_t> maybe_index = map_->Get(obj);
return maybe_index.IsJust() ? maybe_index.FromJust() : kInvalidRootIndex;
if (maybe_index.IsJust()) {
*out_root_list = static_cast<RootIndex>(maybe_index.FromJust());
return true;
}
return false;
}
private:

View File

@ -0,0 +1,52 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_ALLOCATION_SITE_SCOPES_INL_H_
#define V8_ALLOCATION_SITE_SCOPES_INL_H_
#include "src/allocation-site-scopes.h"
#include "src/objects/allocation-site-inl.h"
namespace v8 {
namespace internal {
Handle<AllocationSite> AllocationSiteUsageContext::EnterNewScope() {
if (top().is_null()) {
InitializeTraversal(top_site_);
} else {
// Advance current site
Object* nested_site = current()->nested_site();
// Something is wrong if we advance to the end of the list here.
update_current_site(AllocationSite::cast(nested_site));
}
return Handle<AllocationSite>(*current(), isolate());
}
void AllocationSiteUsageContext::ExitScope(Handle<AllocationSite> scope_site,
Handle<JSObject> object) {
// This assert ensures that we are pointing at the right sub-object in a
// recursive walk of a nested literal.
DCHECK(object.is_null() || *object == scope_site->boilerplate());
}
bool AllocationSiteUsageContext::ShouldCreateMemento(Handle<JSObject> object) {
if (activated_ && AllocationSite::CanTrack(object->map()->instance_type())) {
if (FLAG_allocation_site_pretenuring ||
AllocationSite::ShouldTrack(object->GetElementsKind())) {
if (FLAG_trace_creation_allocation_sites) {
PrintF("*** Creating Memento for %s %p\n",
object->IsJSArray() ? "JSArray" : "JSObject",
static_cast<void*>(*object));
}
return true;
}
}
return false;
}
} // namespace internal
} // namespace v8
#endif // V8_ALLOCATION_SITE_SCOPES_INL_H_

View File

@ -56,40 +56,12 @@ class AllocationSiteUsageContext : public AllocationSiteContext {
top_site_(site),
activated_(activated) { }
inline Handle<AllocationSite> EnterNewScope() {
if (top().is_null()) {
InitializeTraversal(top_site_);
} else {
// Advance current site
Object* nested_site = current()->nested_site();
// Something is wrong if we advance to the end of the list here.
update_current_site(AllocationSite::cast(nested_site));
}
return Handle<AllocationSite>(*current(), isolate());
}
inline Handle<AllocationSite> EnterNewScope();
inline void ExitScope(Handle<AllocationSite> scope_site,
Handle<JSObject> object) {
// This assert ensures that we are pointing at the right sub-object in a
// recursive walk of a nested literal.
DCHECK(object.is_null() || *object == scope_site->boilerplate());
}
Handle<JSObject> object);
bool ShouldCreateMemento(Handle<JSObject> object) {
if (activated_ &&
AllocationSite::CanTrack(object->map()->instance_type())) {
if (FLAG_allocation_site_pretenuring ||
AllocationSite::ShouldTrack(object->GetElementsKind())) {
if (FLAG_trace_creation_allocation_sites) {
PrintF("*** Creating Memento for %s %p\n",
object->IsJSArray() ? "JSArray" : "JSObject",
static_cast<void*>(*object));
}
return true;
}
}
return false;
}
inline bool ShouldCreateMemento(Handle<JSObject> object);
static const bool kCopying = true;

View File

@ -8,6 +8,7 @@
#include "src/base/bits.h"
#include "src/base/lazy-instance.h"
#include "src/base/logging.h"
#include "src/base/lsan-page-allocator.h"
#include "src/base/page-allocator.h"
#include "src/base/platform/platform.h"
#include "src/utils.h"
@ -17,10 +18,6 @@
#include <malloc.h> // NOLINT
#endif
#if defined(LEAK_SANITIZER)
#include <sanitizer/lsan_interface.h>
#endif
namespace v8 {
namespace internal {
@ -51,21 +48,29 @@ struct InitializePageAllocator {
static v8::base::PageAllocator default_allocator;
page_allocator = &default_allocator;
}
#if defined(LEAK_SANITIZER)
{
static v8::base::LsanPageAllocator lsan_allocator(page_allocator);
page_allocator = &lsan_allocator;
}
#endif
*page_allocator_ptr = page_allocator;
}
};
static base::LazyInstance<v8::PageAllocator*, InitializePageAllocator>::type
page_allocator = LAZY_INSTANCE_INITIALIZER;
v8::PageAllocator* GetPageAllocator() { return page_allocator.Get(); }
// We will attempt allocation this many times. After each failure, we call
// OnCriticalMemoryPressure to try to free some memory.
const int kAllocationTries = 2;
} // namespace
v8::PageAllocator* GetPlatformPageAllocator() {
DCHECK_NOT_NULL(page_allocator.Get());
return page_allocator.Get();
}
void* Malloced::New(size_t size) {
void* result = AllocWithRetry(size);
if (result == nullptr) {
@ -131,68 +136,62 @@ void AlignedFree(void *ptr) {
#endif
}
size_t AllocatePageSize() { return GetPageAllocator()->AllocatePageSize(); }
size_t AllocatePageSize() {
return GetPlatformPageAllocator()->AllocatePageSize();
}
size_t CommitPageSize() { return GetPageAllocator()->CommitPageSize(); }
size_t CommitPageSize() { return GetPlatformPageAllocator()->CommitPageSize(); }
void SetRandomMmapSeed(int64_t seed) {
GetPageAllocator()->SetRandomMmapSeed(seed);
GetPlatformPageAllocator()->SetRandomMmapSeed(seed);
}
void* GetRandomMmapAddr() { return GetPageAllocator()->GetRandomMmapAddr(); }
void* GetRandomMmapAddr() {
return GetPlatformPageAllocator()->GetRandomMmapAddr();
}
void* AllocatePages(void* address, size_t size, size_t alignment,
void* AllocatePages(v8::PageAllocator* page_allocator, void* address,
size_t size, size_t alignment,
PageAllocator::Permission access) {
DCHECK_NOT_NULL(page_allocator);
DCHECK_EQ(address, AlignedAddress(address, alignment));
DCHECK_EQ(0UL, size & (GetPageAllocator()->AllocatePageSize() - 1));
DCHECK_EQ(0UL, size & (page_allocator->AllocatePageSize() - 1));
void* result = nullptr;
for (int i = 0; i < kAllocationTries; ++i) {
result =
GetPageAllocator()->AllocatePages(address, size, alignment, access);
result = page_allocator->AllocatePages(address, size, alignment, access);
if (result != nullptr) break;
size_t request_size = size + alignment - AllocatePageSize();
size_t request_size = size + alignment - page_allocator->AllocatePageSize();
if (!OnCriticalMemoryPressure(request_size)) break;
}
#if defined(LEAK_SANITIZER)
if (result != nullptr) {
__lsan_register_root_region(result, size);
}
#endif
return result;
}
bool FreePages(void* address, const size_t size) {
DCHECK_EQ(0UL, size & (GetPageAllocator()->AllocatePageSize() - 1));
bool result = GetPageAllocator()->FreePages(address, size);
#if defined(LEAK_SANITIZER)
if (result) {
__lsan_unregister_root_region(address, size);
}
#endif
return result;
bool FreePages(v8::PageAllocator* page_allocator, void* address,
const size_t size) {
DCHECK_NOT_NULL(page_allocator);
DCHECK_EQ(0UL, size & (page_allocator->AllocatePageSize() - 1));
return page_allocator->FreePages(address, size);
}
bool ReleasePages(void* address, size_t size, size_t new_size) {
bool ReleasePages(v8::PageAllocator* page_allocator, void* address, size_t size,
size_t new_size) {
DCHECK_NOT_NULL(page_allocator);
DCHECK_LT(new_size, size);
bool result = GetPageAllocator()->ReleasePages(address, size, new_size);
#if defined(LEAK_SANITIZER)
if (result) {
__lsan_unregister_root_region(address, size);
__lsan_register_root_region(address, new_size);
}
#endif
return result;
return page_allocator->ReleasePages(address, size, new_size);
}
bool SetPermissions(void* address, size_t size,
PageAllocator::Permission access) {
return GetPageAllocator()->SetPermissions(address, size, access);
bool SetPermissions(v8::PageAllocator* page_allocator, void* address,
size_t size, PageAllocator::Permission access) {
DCHECK_NOT_NULL(page_allocator);
return page_allocator->SetPermissions(address, size, access);
}
byte* AllocatePage(void* address, size_t* allocated) {
size_t page_size = AllocatePageSize();
void* result =
AllocatePages(address, page_size, page_size, PageAllocator::kReadWrite);
byte* AllocatePage(v8::PageAllocator* page_allocator, void* address,
size_t* allocated) {
DCHECK_NOT_NULL(page_allocator);
size_t page_size = page_allocator->AllocatePageSize();
void* result = AllocatePages(page_allocator, address, page_size, page_size,
PageAllocator::kReadWrite);
if (result != nullptr) *allocated = page_size;
return static_cast<byte*>(result);
}
@ -206,16 +205,17 @@ bool OnCriticalMemoryPressure(size_t length) {
return true;
}
VirtualMemory::VirtualMemory() : address_(kNullAddress), size_(0) {}
VirtualMemory::VirtualMemory(size_t size, void* hint, size_t alignment)
: address_(kNullAddress), size_(0) {
size_t page_size = AllocatePageSize();
size_t alloc_size = RoundUp(size, page_size);
address_ = reinterpret_cast<Address>(
AllocatePages(hint, alloc_size, alignment, PageAllocator::kNoAccess));
if (address_ != kNullAddress) {
size_ = alloc_size;
VirtualMemory::VirtualMemory(v8::PageAllocator* page_allocator, size_t size,
void* hint, size_t alignment)
: page_allocator_(page_allocator) {
DCHECK_NOT_NULL(page_allocator);
size_t page_size = page_allocator_->AllocatePageSize();
alignment = RoundUp(alignment, page_size);
size = RoundUp(size, page_size);
Address address = reinterpret_cast<Address>(AllocatePages(
page_allocator_, hint, size, alignment, PageAllocator::kNoAccess));
if (address != kNullAddress) {
region_ = base::AddressRegion(address, size);
}
}
@ -226,30 +226,31 @@ VirtualMemory::~VirtualMemory() {
}
void VirtualMemory::Reset() {
address_ = kNullAddress;
size_ = 0;
page_allocator_ = nullptr;
region_ = base::AddressRegion();
}
bool VirtualMemory::SetPermissions(Address address, size_t size,
PageAllocator::Permission access) {
CHECK(InVM(address, size));
bool result = v8::internal::SetPermissions(address, size, access);
bool result =
v8::internal::SetPermissions(page_allocator_, address, size, access);
DCHECK(result);
return result;
}
size_t VirtualMemory::Release(Address free_start) {
DCHECK(IsReserved());
DCHECK(IsAddressAligned(free_start, CommitPageSize()));
DCHECK(IsAddressAligned(free_start, page_allocator_->CommitPageSize()));
// Notice: Order is important here. The VirtualMemory object might live
// inside the allocated region.
const size_t free_size = size_ - (free_start - address_);
const size_t old_size = region_.size();
const size_t free_size = old_size - (free_start - region_.begin());
CHECK(InVM(free_start, free_size));
DCHECK_LT(address_, free_start);
DCHECK_LT(free_start, address_ + size_);
CHECK(ReleasePages(reinterpret_cast<void*>(address_), size_,
size_ - free_size));
size_ -= free_size;
region_.set_size(old_size - free_size);
CHECK(ReleasePages(page_allocator_, reinterpret_cast<void*>(region_.begin()),
old_size, region_.size()));
return free_size;
}
@ -257,41 +258,21 @@ void VirtualMemory::Free() {
DCHECK(IsReserved());
// Notice: Order is important here. The VirtualMemory object might live
// inside the allocated region.
Address address = address_;
size_t size = size_;
CHECK(InVM(address, size));
v8::PageAllocator* page_allocator = page_allocator_;
base::AddressRegion region = region_;
Reset();
// FreePages expects size to be aligned to allocation granularity. Trimming
// may leave size at only commit granularity. Align it here.
CHECK(FreePages(reinterpret_cast<void*>(address),
RoundUp(size, AllocatePageSize())));
// FreePages expects size to be aligned to allocation granularity however
// ReleasePages may leave size at only commit granularity. Align it here.
CHECK(FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
RoundUp(region.size(), page_allocator->AllocatePageSize())));
}
void VirtualMemory::TakeControl(VirtualMemory* from) {
DCHECK(!IsReserved());
address_ = from->address_;
size_ = from->size_;
page_allocator_ = from->page_allocator_;
region_ = from->region_;
from->Reset();
}
bool AllocVirtualMemory(size_t size, void* hint, VirtualMemory* result) {
VirtualMemory vm(size, hint);
if (vm.IsReserved()) {
result->TakeControl(&vm);
return true;
}
return false;
}
bool AlignedAllocVirtualMemory(size_t size, size_t alignment, void* hint,
VirtualMemory* result) {
VirtualMemory vm(size, hint, alignment);
if (vm.IsReserved()) {
result->TakeControl(&vm);
return true;
}
return false;
}
} // namespace internal
} // namespace v8

View File

@ -6,6 +6,7 @@
#define V8_ALLOCATION_H_
#include "include/v8-platform.h"
#include "src/base/address-region.h"
#include "src/base/compiler-specific.h"
#include "src/base/platform/platform.h"
#include "src/globals.h"
@ -82,6 +83,9 @@ void* AllocWithRetry(size_t size);
void* AlignedAlloc(size_t size, size_t alignment);
void AlignedFree(void *ptr);
// Returns platfrom page allocator instance. Guaranteed to be a valid pointer.
V8_EXPORT_PRIVATE v8::PageAllocator* GetPlatformPageAllocator();
// Gets the page granularity for AllocatePages and FreePages. Addresses returned
// by AllocatePages and AllocatePage are aligned to this size.
V8_EXPORT_PRIVATE size_t AllocatePageSize();
@ -101,14 +105,16 @@ V8_EXPORT_PRIVATE void* GetRandomMmapAddr();
// AllocatePageSize(). Returns the address of the allocated memory, with the
// specified size and alignment, or nullptr on failure.
V8_EXPORT_PRIVATE
V8_WARN_UNUSED_RESULT void* AllocatePages(void* address, size_t size,
V8_WARN_UNUSED_RESULT void* AllocatePages(v8::PageAllocator* page_allocator,
void* address, size_t size,
size_t alignment,
PageAllocator::Permission access);
// Frees memory allocated by a call to AllocatePages. |address| and |size| must
// be multiples of AllocatePageSize(). Returns true on success, otherwise false.
V8_EXPORT_PRIVATE
V8_WARN_UNUSED_RESULT bool FreePages(void* address, const size_t size);
V8_WARN_UNUSED_RESULT bool FreePages(v8::PageAllocator* page_allocator,
void* address, const size_t size);
// Releases memory that is no longer needed. The range specified by |address|
// and |size| must be an allocated memory region. |size| and |new_size| must be
@ -116,7 +122,8 @@ V8_WARN_UNUSED_RESULT bool FreePages(void* address, const size_t size);
// Released memory is left in an undefined state, so it should not be accessed.
// Returns true on success, otherwise false.
V8_EXPORT_PRIVATE
V8_WARN_UNUSED_RESULT bool ReleasePages(void* address, size_t size,
V8_WARN_UNUSED_RESULT bool ReleasePages(v8::PageAllocator* page_allocator,
void* address, size_t size,
size_t new_size);
// Sets permissions according to |access|. |address| and |size| must be
@ -124,18 +131,21 @@ V8_WARN_UNUSED_RESULT bool ReleasePages(void* address, size_t size,
// cause the memory contents to be lost. Returns true on success, otherwise
// false.
V8_EXPORT_PRIVATE
V8_WARN_UNUSED_RESULT bool SetPermissions(void* address, size_t size,
V8_WARN_UNUSED_RESULT bool SetPermissions(v8::PageAllocator* page_allocator,
void* address, size_t size,
PageAllocator::Permission access);
inline bool SetPermissions(Address address, size_t size,
PageAllocator::Permission access) {
return SetPermissions(reinterpret_cast<void*>(address), size, access);
inline bool SetPermissions(v8::PageAllocator* page_allocator, Address address,
size_t size, PageAllocator::Permission access) {
return SetPermissions(page_allocator, reinterpret_cast<void*>(address), size,
access);
}
// Convenience function that allocates a single system page with read and write
// permissions. |address| is a hint. Returns the base address of the memory and
// the page size via |allocated| on success. Returns nullptr on failure.
V8_EXPORT_PRIVATE
V8_WARN_UNUSED_RESULT byte* AllocatePage(void* address, size_t* allocated);
V8_WARN_UNUSED_RESULT byte* AllocatePage(v8::PageAllocator* page_allocator,
void* address, size_t* allocated);
// Function that may release reserved memory regions to allow failed allocations
// to succeed. |length| is the amount of memory needed. Returns |true| if memory
@ -143,50 +153,67 @@ V8_WARN_UNUSED_RESULT byte* AllocatePage(void* address, size_t* allocated);
V8_EXPORT_PRIVATE bool OnCriticalMemoryPressure(size_t length);
// Represents and controls an area of reserved memory.
class V8_EXPORT_PRIVATE VirtualMemory {
class V8_EXPORT_PRIVATE VirtualMemory final {
public:
// Empty VirtualMemory object, controlling no reserved memory.
VirtualMemory();
VirtualMemory() = default;
// Reserves virtual memory containing an area of the given size that is
// aligned per alignment. This may not be at the position returned by
// address().
VirtualMemory(size_t size, void* hint, size_t alignment = AllocatePageSize());
// aligned per |alignment| rounded up to the |page_allocator|'s allocate page
// size.
// This may not be at the position returned by address().
VirtualMemory(v8::PageAllocator* page_allocator, size_t size, void* hint,
size_t alignment = 1);
// Construct a virtual memory by assigning it some already mapped address
// and size.
VirtualMemory(Address address, size_t size)
: address_(address), size_(size) {}
VirtualMemory(v8::PageAllocator* page_allocator, Address address, size_t size)
: page_allocator_(page_allocator), region_(address, size) {
DCHECK_NOT_NULL(page_allocator);
}
// Releases the reserved memory, if any, controlled by this VirtualMemory
// object.
~VirtualMemory();
// Move constructor.
VirtualMemory(VirtualMemory&& other) V8_NOEXCEPT { TakeControl(&other); }
// Move assignment operator.
VirtualMemory& operator=(VirtualMemory&& other) V8_NOEXCEPT {
TakeControl(&other);
return *this;
}
// Returns whether the memory has been reserved.
bool IsReserved() const { return address_ != kNullAddress; }
bool IsReserved() const { return region_.begin() != kNullAddress; }
// Initialize or resets an embedded VirtualMemory object.
void Reset();
v8::PageAllocator* page_allocator() { return page_allocator_; }
const base::AddressRegion& region() const { return region_; }
// Returns the start address of the reserved memory.
// If the memory was reserved with an alignment, this address is not
// necessarily aligned. The user might need to round it up to a multiple of
// the alignment to get the start of the aligned block.
Address address() const {
DCHECK(IsReserved());
return address_;
return region_.begin();
}
Address end() const {
DCHECK(IsReserved());
return address_ + size_;
return region_.end();
}
// Returns the size of the reserved memory. The returned value is only
// meaningful when IsReserved() returns true.
// If the memory was reserved with an alignment, this size may be larger
// than the requested size.
size_t size() const { return size_; }
size_t size() const { return region_.size(); }
// Sets permissions according to the access argument. address and size must be
// multiples of CommitPageSize(). Returns true on success, otherwise false.
@ -204,17 +231,16 @@ class V8_EXPORT_PRIVATE VirtualMemory {
void TakeControl(VirtualMemory* from);
bool InVM(Address address, size_t size) {
return (address_ <= address) && ((address_ + size_) >= (address + size));
return region_.contains(address, size);
}
private:
Address address_; // Start address of the virtual memory.
size_t size_; // Size of the virtual memory.
};
// Page allocator that controls the virtual memory.
v8::PageAllocator* page_allocator_ = nullptr;
base::AddressRegion region_;
bool AllocVirtualMemory(size_t size, void* hint, VirtualMemory* result);
bool AlignedAllocVirtualMemory(size_t size, size_t alignment, void* hint,
VirtualMemory* result);
DISALLOW_COPY_AND_ASSIGN(VirtualMemory);
};
} // namespace internal
} // namespace v8

View File

@ -8,6 +8,7 @@
#include "src/api-arguments.h"
#include "src/api-inl.h"
#include "src/debug/debug.h"
#include "src/objects/api-callbacks.h"
#include "src/tracing/trace-event.h"
#include "src/vm-state-inl.h"
@ -34,6 +35,10 @@ inline JSObject* PropertyCallbackArguments::holder() {
return JSObject::cast(this->begin()[T::kHolderIndex]);
}
inline Object* PropertyCallbackArguments::receiver() {
return Object::cast(this->begin()[T::kThisIndex]);
}
inline JSObject* FunctionCallbackArguments::holder() {
return JSObject::cast(this->begin()[T::kHolderIndex]);
}
@ -47,14 +52,24 @@ inline JSObject* FunctionCallbackArguments::holder() {
DCHECK(!name->IsPrivate()); \
DCHECK_IMPLIES(name->IsSymbol(), interceptor->can_intercept_symbols());
#define PREPARE_CALLBACK_INFO(ISOLATE, F, RETURN_VALUE, API_RETURN_TYPE, \
CALLBACK_INFO) \
if (ISOLATE->debug_execution_mode() == DebugInfo::kSideEffects && \
!ISOLATE->debug()->PerformSideEffectCheckForCallback(CALLBACK_INFO)) { \
return RETURN_VALUE(); \
} \
VMState<EXTERNAL> state(ISOLATE); \
ExternalCallbackScope call_scope(ISOLATE, FUNCTION_ADDR(F)); \
#define PREPARE_CALLBACK_INFO(ISOLATE, F, RETURN_VALUE, API_RETURN_TYPE, \
CALLBACK_INFO, RECEIVER, ACCESSOR_KIND) \
if (ISOLATE->debug_execution_mode() == DebugInfo::kSideEffects && \
!ISOLATE->debug()->PerformSideEffectCheckForCallback( \
CALLBACK_INFO, RECEIVER, Debug::k##ACCESSOR_KIND)) { \
return RETURN_VALUE(); \
} \
VMState<EXTERNAL> state(ISOLATE); \
ExternalCallbackScope call_scope(ISOLATE, FUNCTION_ADDR(F)); \
PropertyCallbackInfo<API_RETURN_TYPE> callback_info(begin());
#define PREPARE_CALLBACK_INFO_FAIL_SIDE_EFFECT_CHECK(ISOLATE, F, RETURN_VALUE, \
API_RETURN_TYPE) \
if (ISOLATE->debug_execution_mode() == DebugInfo::kSideEffects) { \
return RETURN_VALUE(); \
} \
VMState<EXTERNAL> state(ISOLATE); \
ExternalCallbackScope call_scope(ISOLATE, FUNCTION_ADDR(F)); \
PropertyCallbackInfo<API_RETURN_TYPE> callback_info(begin());
#define CREATE_NAMED_CALLBACK(FUNCTION, TYPE, RETURN_TYPE, API_RETURN_TYPE, \
@ -65,11 +80,13 @@ inline JSObject* FunctionCallbackArguments::holder() {
Isolate* isolate = this->isolate(); \
RuntimeCallTimerScope timer( \
isolate, RuntimeCallCounterId::kNamed##FUNCTION##Callback); \
Handle<Object> receiver_check_unsupported; \
GenericNamedProperty##FUNCTION##Callback f = \
ToCData<GenericNamedProperty##FUNCTION##Callback>( \
interceptor->TYPE()); \
PREPARE_CALLBACK_INFO(isolate, f, Handle<RETURN_TYPE>, API_RETURN_TYPE, \
INFO_FOR_SIDE_EFFECT); \
INFO_FOR_SIDE_EFFECT, receiver_check_unsupported, \
NotAccessor); \
LOG(isolate, \
ApiNamedPropertyAccess("interceptor-named-" #TYPE, holder(), *name)); \
f(v8::Utils::ToLocal(name), callback_info); \
@ -87,10 +104,12 @@ FOR_EACH_CALLBACK(CREATE_NAMED_CALLBACK)
Isolate* isolate = this->isolate(); \
RuntimeCallTimerScope timer( \
isolate, RuntimeCallCounterId::kIndexed##FUNCTION##Callback); \
Handle<Object> receiver_check_unsupported; \
IndexedProperty##FUNCTION##Callback f = \
ToCData<IndexedProperty##FUNCTION##Callback>(interceptor->TYPE()); \
PREPARE_CALLBACK_INFO(isolate, f, Handle<RETURN_TYPE>, API_RETURN_TYPE, \
INFO_FOR_SIDE_EFFECT); \
INFO_FOR_SIDE_EFFECT, receiver_check_unsupported, \
NotAccessor); \
LOG(isolate, ApiIndexedPropertyAccess("interceptor-indexed-" #TYPE, \
holder(), index)); \
f(index, callback_info); \
@ -108,9 +127,11 @@ Handle<Object> FunctionCallbackArguments::Call(CallHandlerInfo* handler) {
RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kFunctionCallback);
v8::FunctionCallback f =
v8::ToCData<v8::FunctionCallback>(handler->callback());
Handle<Object> receiver_check_unsupported;
if (isolate->debug_execution_mode() == DebugInfo::kSideEffects &&
!isolate->debug()->PerformSideEffectCheckForCallback(
handle(handler, isolate))) {
handle(handler, isolate), receiver_check_unsupported,
Debug::kNotAccessor)) {
return Handle<Object>();
}
VMState<EXTERNAL> state(isolate);
@ -167,10 +188,11 @@ Handle<Object> PropertyCallbackArguments::CallNamedDescriptor(
Handle<Object> PropertyCallbackArguments::BasicCallNamedGetterCallback(
GenericNamedPropertyGetterCallback f, Handle<Name> name,
Handle<Object> info) {
Handle<Object> info, Handle<Object> receiver) {
DCHECK(!name->IsPrivate());
Isolate* isolate = this->isolate();
PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value, info);
PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value, info, receiver,
Getter);
f(v8::Utils::ToLocal(name), callback_info);
return GetReturnValue<Object>(isolate);
}
@ -184,9 +206,8 @@ Handle<Object> PropertyCallbackArguments::CallNamedSetter(
Isolate* isolate = this->isolate();
RuntimeCallTimerScope timer(isolate,
RuntimeCallCounterId::kNamedSetterCallback);
Handle<Object> side_effect_check_not_supported;
PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value,
side_effect_check_not_supported);
PREPARE_CALLBACK_INFO_FAIL_SIDE_EFFECT_CHECK(isolate, f, Handle<Object>,
v8::Value);
LOG(isolate,
ApiNamedPropertyAccess("interceptor-named-set", holder(), *name));
f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), callback_info);
@ -202,9 +223,8 @@ Handle<Object> PropertyCallbackArguments::CallNamedDefiner(
RuntimeCallCounterId::kNamedDefinerCallback);
GenericNamedPropertyDefinerCallback f =
ToCData<GenericNamedPropertyDefinerCallback>(interceptor->definer());
Handle<Object> side_effect_check_not_supported;
PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value,
side_effect_check_not_supported);
PREPARE_CALLBACK_INFO_FAIL_SIDE_EFFECT_CHECK(isolate, f, Handle<Object>,
v8::Value);
LOG(isolate,
ApiNamedPropertyAccess("interceptor-named-define", holder(), *name));
f(v8::Utils::ToLocal(name), desc, callback_info);
@ -219,9 +239,8 @@ Handle<Object> PropertyCallbackArguments::CallIndexedSetter(
RuntimeCallCounterId::kIndexedSetterCallback);
IndexedPropertySetterCallback f =
ToCData<IndexedPropertySetterCallback>(interceptor->setter());
Handle<Object> side_effect_check_not_supported;
PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value,
side_effect_check_not_supported);
PREPARE_CALLBACK_INFO_FAIL_SIDE_EFFECT_CHECK(isolate, f, Handle<Object>,
v8::Value);
LOG(isolate,
ApiIndexedPropertyAccess("interceptor-indexed-set", holder(), index));
f(index, v8::Utils::ToLocal(value), callback_info);
@ -237,9 +256,8 @@ Handle<Object> PropertyCallbackArguments::CallIndexedDefiner(
RuntimeCallCounterId::kIndexedDefinerCallback);
IndexedPropertyDefinerCallback f =
ToCData<IndexedPropertyDefinerCallback>(interceptor->definer());
Handle<Object> side_effect_check_not_supported;
PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value,
side_effect_check_not_supported);
PREPARE_CALLBACK_INFO_FAIL_SIDE_EFFECT_CHECK(isolate, f, Handle<Object>,
v8::Value);
LOG(isolate,
ApiIndexedPropertyAccess("interceptor-indexed-define", holder(), index));
f(index, desc, callback_info);
@ -275,7 +293,9 @@ Handle<Object> PropertyCallbackArguments::CallIndexedDescriptor(
Handle<Object> PropertyCallbackArguments::BasicCallIndexedGetterCallback(
IndexedPropertyGetterCallback f, uint32_t index, Handle<Object> info) {
Isolate* isolate = this->isolate();
PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value, info);
Handle<Object> receiver_check_unsupported;
PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, v8::Value, info,
receiver_check_unsupported, Getter);
f(index, callback_info);
return GetReturnValue<Object>(isolate);
}
@ -287,7 +307,9 @@ Handle<JSObject> PropertyCallbackArguments::CallPropertyEnumerator(
v8::ToCData<IndexedPropertyEnumeratorCallback>(interceptor->enumerator());
// TODO(cbruni): assert same type for indexed and named callback.
Isolate* isolate = this->isolate();
PREPARE_CALLBACK_INFO(isolate, f, Handle<JSObject>, v8::Array, interceptor);
Handle<Object> receiver_check_unsupported;
PREPARE_CALLBACK_INFO(isolate, f, Handle<JSObject>, v8::Array, interceptor,
receiver_check_unsupported, NotAccessor);
f(callback_info);
return GetReturnValue<JSObject>(isolate);
}
@ -303,7 +325,8 @@ Handle<Object> PropertyCallbackArguments::CallAccessorGetter(
LOG(isolate, ApiNamedPropertyAccess("accessor-getter", holder(), *name));
AccessorNameGetterCallback f =
ToCData<AccessorNameGetterCallback>(info->getter());
return BasicCallNamedGetterCallback(f, name, info);
return BasicCallNamedGetterCallback(f, name, info,
handle(receiver(), isolate));
}
Handle<Object> PropertyCallbackArguments::CallAccessorSetter(
@ -314,15 +337,15 @@ Handle<Object> PropertyCallbackArguments::CallAccessorSetter(
RuntimeCallCounterId::kAccessorSetterCallback);
AccessorNameSetterCallback f =
ToCData<AccessorNameSetterCallback>(accessor_info->setter());
Handle<Object> side_effect_check_not_supported;
PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, void,
side_effect_check_not_supported);
PREPARE_CALLBACK_INFO(isolate, f, Handle<Object>, void, accessor_info,
handle(receiver(), isolate), Setter);
LOG(isolate, ApiNamedPropertyAccess("accessor-setter", holder(), *name));
f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), callback_info);
return GetReturnValue<Object>(isolate);
}
#undef PREPARE_CALLBACK_INFO
#undef PREPARE_CALLBACK_INFO_FAIL_SIDE_EFFECT_CHECK
} // namespace internal
} // namespace v8

View File

@ -26,12 +26,12 @@ class CustomArguments : public CustomArgumentsBase {
public:
static const int kReturnValueOffset = T::kReturnValueIndex;
~CustomArguments() {
~CustomArguments() override {
this->begin()[kReturnValueOffset] =
reinterpret_cast<Object*>(kHandleZapValue);
}
virtual inline void IterateInstance(RootVisitor* v) {
inline void IterateInstance(RootVisitor* v) override {
v->VisitRootPointers(Root::kRelocatable, nullptr, values_,
values_ + T::kArgsLength);
}
@ -133,9 +133,10 @@ class PropertyCallbackArguments
IndexedPropertyGetterCallback f, uint32_t index, Handle<Object> info);
inline Handle<Object> BasicCallNamedGetterCallback(
GenericNamedPropertyGetterCallback f, Handle<Name> name,
Handle<Object> info);
Handle<Object> info, Handle<Object> receiver = Handle<Object>());
inline JSObject* holder();
inline Object* receiver();
// Don't copy PropertyCallbackArguments, because they would both have the
// same prev_ pointer.

View File

@ -7,6 +7,7 @@
#include "src/api.h"
#include "src/objects-inl.h"
#include "src/objects/stack-frame-info.h"
namespace v8 {

View File

@ -114,9 +114,8 @@ MaybeHandle<Object> DefineDataProperty(Isolate* isolate,
}
#endif
MAYBE_RETURN_NULL(
Object::AddDataProperty(&it, value, attributes, kThrowOnError,
Object::CERTAINLY_NOT_STORE_FROM_KEYED));
MAYBE_RETURN_NULL(Object::AddDataProperty(
&it, value, attributes, kThrowOnError, StoreOrigin::kNamed));
return value;
}
@ -403,8 +402,10 @@ MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
}
Handle<JSObject> object;
ASSIGN_RETURN_ON_EXCEPTION(isolate, object,
JSObject::New(constructor, new_target), JSObject);
ASSIGN_RETURN_ON_EXCEPTION(
isolate, object,
JSObject::New(constructor, new_target, Handle<AllocationSite>::null()),
JSObject);
if (is_prototype) JSObject::OptimizeAsPrototype(object);
@ -495,8 +496,15 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
parent_prototype);
}
}
InstanceType function_type =
(!data->needs_access_check() &&
data->named_property_handler()->IsUndefined(isolate) &&
data->indexed_property_handler()->IsUndefined(isolate))
? JS_API_OBJECT_TYPE
: JS_SPECIAL_API_OBJECT_TYPE;
Handle<JSFunction> function = ApiNatives::CreateApiFunction(
isolate, data, prototype, ApiNatives::JavaScriptObjectType, maybe_name);
isolate, data, prototype, function_type, maybe_name);
if (serial_number) {
// Cache the function.
CacheTemplateInstantiation(isolate, serial_number, CachingMode::kUnlimited,
@ -625,8 +633,7 @@ void ApiNatives::AddNativeDataProperty(Isolate* isolate,
Handle<JSFunction> ApiNatives::CreateApiFunction(
Isolate* isolate, Handle<FunctionTemplateInfo> obj,
Handle<Object> prototype, ApiInstanceType instance_type,
MaybeHandle<Name> maybe_name) {
Handle<Object> prototype, InstanceType type, MaybeHandle<Name> maybe_name) {
Handle<SharedFunctionInfo> shared =
FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(isolate, obj,
maybe_name);
@ -670,33 +677,10 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
immutable_proto = instance_template->immutable_proto();
}
// TODO(svenpanne) Kill ApiInstanceType and refactor things by generalizing
// JSObject::GetHeaderSize.
int instance_size = kPointerSize * embedder_field_count;
InstanceType type;
switch (instance_type) {
case JavaScriptObjectType:
if (!obj->needs_access_check() &&
obj->named_property_handler()->IsUndefined(isolate) &&
obj->indexed_property_handler()->IsUndefined(isolate)) {
type = JS_API_OBJECT_TYPE;
} else {
type = JS_SPECIAL_API_OBJECT_TYPE;
}
instance_size += JSObject::kHeaderSize;
break;
case GlobalObjectType:
type = JS_GLOBAL_OBJECT_TYPE;
instance_size += JSGlobalObject::kSize;
break;
case GlobalProxyType:
type = JS_GLOBAL_PROXY_TYPE;
instance_size += JSGlobalProxy::kSize;
break;
default:
UNREACHABLE();
break;
}
// JS_FUNCTION_TYPE requires information about the prototype slot.
DCHECK_NE(JS_FUNCTION_TYPE, type);
int instance_size =
JSObject::GetHeaderSize(type) + kPointerSize * embedder_field_count;
Handle<Map> map = isolate->factory()->NewMap(type, instance_size,
TERMINAL_FAST_ELEMENTS_KIND);

View File

@ -9,6 +9,7 @@
#include "src/base/macros.h"
#include "src/handles.h"
#include "src/maybe-handles.h"
#include "src/objects.h"
#include "src/property-details.h"
namespace v8 {
@ -33,15 +34,9 @@ class ApiNatives {
V8_WARN_UNUSED_RESULT static MaybeHandle<JSObject> InstantiateRemoteObject(
Handle<ObjectTemplateInfo> data);
enum ApiInstanceType {
JavaScriptObjectType,
GlobalObjectType,
GlobalProxyType
};
static Handle<JSFunction> CreateApiFunction(
Isolate* isolate, Handle<FunctionTemplateInfo> obj,
Handle<Object> prototype, ApiInstanceType instance_type,
Handle<Object> prototype, InstanceType type,
MaybeHandle<Name> name = MaybeHandle<Name>());
static void AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,

456
deps/v8/src/api.cc vendored
View File

@ -58,7 +58,9 @@
#include "src/objects/js-regexp-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/ordered-hash-table-inl.h"
#include "src/objects/stack-frame-info-inl.h"
#include "src/objects/templates.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parser.h"
#include "src/parsing/scanner-character-streams.h"
#include "src/pending-compilation-error-handler.h"
@ -834,6 +836,7 @@ StartupData SnapshotCreator::CreateBlob(
}
data->created_ = true;
DCHECK(i::Snapshot::VerifyChecksum(&result));
return result;
}
@ -876,12 +879,12 @@ void RegisteredExtension::UnregisterAll() {
namespace {
class ExtensionResource : public String::ExternalOneByteStringResource {
public:
ExtensionResource() : data_(0), length_(0) {}
ExtensionResource() : data_(nullptr), length_(0) {}
ExtensionResource(const char* data, size_t length)
: data_(data), length_(length) {}
const char* data() const { return data_; }
size_t length() const { return length_; }
virtual void Dispose() {}
const char* data() const override { return data_; }
size_t length() const override { return length_; }
void Dispose() override {}
private:
const char* data_;
@ -1391,7 +1394,7 @@ static Local<FunctionTemplate> FunctionTemplateNew(
next_serial_number = isolate->heap()->GetNextTemplateSerialNumber();
}
obj->set_serial_number(i::Smi::FromInt(next_serial_number));
if (callback != 0) {
if (callback != nullptr) {
Utils::ToLocal(obj)->SetCallHandler(callback, data, side_effect_type);
}
obj->set_length(length);
@ -1676,7 +1679,8 @@ static void TemplateSetAccessor(
Template* template_obj, v8::Local<Name> name, Getter getter, Setter setter,
Data data, AccessControl settings, PropertyAttribute attribute,
v8::Local<AccessorSignature> signature, bool is_special_data_property,
bool replace_on_access, SideEffectType getter_side_effect_type) {
bool replace_on_access, SideEffectType getter_side_effect_type,
SideEffectType setter_side_effect_type) {
auto info = Utils::OpenHandle(template_obj);
auto isolate = info->GetIsolate();
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate);
@ -1686,8 +1690,8 @@ static void TemplateSetAccessor(
is_special_data_property, replace_on_access);
accessor_info->set_initial_property_attributes(
static_cast<i::PropertyAttributes>(attribute));
accessor_info->set_has_no_side_effect(getter_side_effect_type ==
SideEffectType::kHasNoSideEffect);
accessor_info->set_getter_side_effect_type(getter_side_effect_type);
accessor_info->set_setter_side_effect_type(setter_side_effect_type);
i::ApiNatives::AddNativeDataProperty(isolate, info, accessor_info);
}
@ -1695,29 +1699,34 @@ void Template::SetNativeDataProperty(
v8::Local<String> name, AccessorGetterCallback getter,
AccessorSetterCallback setter, v8::Local<Value> data,
PropertyAttribute attribute, v8::Local<AccessorSignature> signature,
AccessControl settings, SideEffectType getter_side_effect_type) {
AccessControl settings, SideEffectType getter_side_effect_type,
SideEffectType setter_side_effect_type) {
TemplateSetAccessor(this, name, getter, setter, data, settings, attribute,
signature, true, false, getter_side_effect_type);
signature, true, false, getter_side_effect_type,
setter_side_effect_type);
}
void Template::SetNativeDataProperty(
v8::Local<Name> name, AccessorNameGetterCallback getter,
AccessorNameSetterCallback setter, v8::Local<Value> data,
PropertyAttribute attribute, v8::Local<AccessorSignature> signature,
AccessControl settings, SideEffectType getter_side_effect_type) {
AccessControl settings, SideEffectType getter_side_effect_type,
SideEffectType setter_side_effect_type) {
TemplateSetAccessor(this, name, getter, setter, data, settings, attribute,
signature, true, false, getter_side_effect_type);
signature, true, false, getter_side_effect_type,
setter_side_effect_type);
}
void Template::SetLazyDataProperty(v8::Local<Name> name,
AccessorNameGetterCallback getter,
v8::Local<Value> data,
PropertyAttribute attribute,
SideEffectType getter_side_effect_type) {
SideEffectType getter_side_effect_type,
SideEffectType setter_side_effect_type) {
TemplateSetAccessor(this, name, getter,
static_cast<AccessorNameSetterCallback>(nullptr), data,
DEFAULT, attribute, Local<AccessorSignature>(), true,
true, getter_side_effect_type);
true, getter_side_effect_type, setter_side_effect_type);
}
void Template::SetIntrinsicDataProperty(Local<Name> name, Intrinsic intrinsic,
@ -1737,10 +1746,11 @@ void ObjectTemplate::SetAccessor(v8::Local<String> name,
v8::Local<Value> data, AccessControl settings,
PropertyAttribute attribute,
v8::Local<AccessorSignature> signature,
SideEffectType getter_side_effect_type) {
SideEffectType getter_side_effect_type,
SideEffectType setter_side_effect_type) {
TemplateSetAccessor(this, name, getter, setter, data, settings, attribute,
signature, i::FLAG_disable_old_api_accessors, false,
getter_side_effect_type);
getter_side_effect_type, setter_side_effect_type);
}
void ObjectTemplate::SetAccessor(v8::Local<Name> name,
@ -1749,10 +1759,11 @@ void ObjectTemplate::SetAccessor(v8::Local<Name> name,
v8::Local<Value> data, AccessControl settings,
PropertyAttribute attribute,
v8::Local<AccessorSignature> signature,
SideEffectType getter_side_effect_type) {
SideEffectType getter_side_effect_type,
SideEffectType setter_side_effect_type) {
TemplateSetAccessor(this, name, getter, setter, data, settings, attribute,
signature, i::FLAG_disable_old_api_accessors, false,
getter_side_effect_type);
getter_side_effect_type, setter_side_effect_type);
}
template <typename Getter, typename Setter, typename Query, typename Descriptor,
@ -1765,15 +1776,15 @@ static i::Handle<i::InterceptorInfo> CreateInterceptorInfo(
isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE, i::TENURED));
obj->set_flags(0);
if (getter != 0) SET_FIELD_WRAPPED(isolate, obj, set_getter, getter);
if (setter != 0) SET_FIELD_WRAPPED(isolate, obj, set_setter, setter);
if (query != 0) SET_FIELD_WRAPPED(isolate, obj, set_query, query);
if (descriptor != 0)
if (getter != nullptr) SET_FIELD_WRAPPED(isolate, obj, set_getter, getter);
if (setter != nullptr) SET_FIELD_WRAPPED(isolate, obj, set_setter, setter);
if (query != nullptr) SET_FIELD_WRAPPED(isolate, obj, set_query, query);
if (descriptor != nullptr)
SET_FIELD_WRAPPED(isolate, obj, set_descriptor, descriptor);
if (remover != 0) SET_FIELD_WRAPPED(isolate, obj, set_deleter, remover);
if (enumerator != 0)
if (remover != nullptr) SET_FIELD_WRAPPED(isolate, obj, set_deleter, remover);
if (enumerator != nullptr)
SET_FIELD_WRAPPED(isolate, obj, set_enumerator, enumerator);
if (definer != 0) SET_FIELD_WRAPPED(isolate, obj, set_definer, definer);
if (definer != nullptr) SET_FIELD_WRAPPED(isolate, obj, set_definer, definer);
obj->set_can_intercept_symbols(
!(static_cast<int>(flags) &
static_cast<int>(PropertyHandlerFlags::kOnlyInterceptStrings)));
@ -2001,24 +2012,15 @@ ScriptCompiler::CachedData::~CachedData() {
}
}
bool ScriptCompiler::ExternalSourceStream::SetBookmark() { return false; }
void ScriptCompiler::ExternalSourceStream::ResetToBookmark() { UNREACHABLE(); }
ScriptCompiler::StreamedSource::StreamedSource(ExternalSourceStream* stream,
Encoding encoding)
: impl_(new i::ScriptStreamingData(stream, encoding)) {}
ScriptCompiler::StreamedSource::~StreamedSource() { delete impl_; }
const ScriptCompiler::CachedData*
ScriptCompiler::StreamedSource::GetCachedData() const {
return impl_->cached_data.get();
}
ScriptCompiler::StreamedSource::~StreamedSource() = default;
Local<Script> UnboundScript::BindToCurrentContext() {
auto function_info =
@ -2030,7 +2032,6 @@ Local<Script> UnboundScript::BindToCurrentContext() {
return ToApiHandle<Script>(function);
}
int UnboundScript::GetId() {
auto function_info =
i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this));
@ -2157,10 +2158,6 @@ int PrimitiveArray::Length() const {
return array->length();
}
void PrimitiveArray::Set(int index, Local<Primitive> item) {
return Set(Isolate::GetCurrent(), index, item);
}
void PrimitiveArray::Set(Isolate* v8_isolate, int index,
Local<Primitive> item) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
@ -2174,10 +2171,6 @@ void PrimitiveArray::Set(Isolate* v8_isolate, int index,
array->set(index, *i_item);
}
Local<Primitive> PrimitiveArray::Get(int index) {
return Get(Isolate::GetCurrent(), index);
}
Local<Primitive> PrimitiveArray::Get(Isolate* v8_isolate, int index) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
i::Handle<i::FixedArray> array = Utils::OpenHandle(this);
@ -2534,6 +2527,7 @@ MaybeLocal<Function> ScriptCompiler::CompileFunctionInContext(
RETURN_ESCAPED(Utils::CallableToLocal(result));
}
void ScriptCompiler::ScriptStreamingTask::Run() { data_->task->Run(); }
ScriptCompiler::ScriptStreamingTask* ScriptCompiler::StartStreamingScript(
Isolate* v8_isolate, StreamedSource* source, CompileOptions options) {
@ -2544,10 +2538,13 @@ ScriptCompiler::ScriptStreamingTask* ScriptCompiler::StartStreamingScript(
// TODO(rmcilroy): remove CompileOptions from the API.
CHECK(options == ScriptCompiler::kNoCompileOptions);
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
return i::Compiler::NewBackgroundCompileTask(source->impl(), isolate);
i::ScriptStreamingData* data = source->impl();
std::unique_ptr<i::BackgroundCompileTask> task =
base::make_unique<i::BackgroundCompileTask>(data, isolate);
data->task = std::move(task);
return new ScriptCompiler::ScriptStreamingTask(data);
}
MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
StreamedSource* v8_source,
Local<String> full_source_string,
@ -2562,11 +2559,11 @@ MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
isolate, origin.ResourceName(), origin.ResourceLineOffset(),
origin.ResourceColumnOffset(), origin.SourceMapUrl(),
origin.HostDefinedOptions());
i::ScriptStreamingData* streaming_data = v8_source->impl();
i::ScriptStreamingData* data = v8_source->impl();
i::MaybeHandle<i::SharedFunctionInfo> maybe_function_info =
i::Compiler::GetSharedFunctionInfoForStreamedScript(
isolate, str, script_details, origin.Options(), streaming_data);
isolate, str, script_details, origin.Options(), data);
i::Handle<i::SharedFunctionInfo> result;
has_pending_exception = !maybe_function_info.ToHandle(&result);
@ -2908,10 +2905,6 @@ void Message::PrintCurrentStackTrace(Isolate* isolate, FILE* out) {
// --- S t a c k T r a c e ---
Local<StackFrame> StackTrace::GetFrame(uint32_t index) const {
return GetFrame(Isolate::GetCurrent(), index);
}
Local<StackFrame> StackTrace::GetFrame(Isolate* v8_isolate,
uint32_t index) const {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
@ -3572,17 +3565,20 @@ MaybeLocal<BigInt> Value::ToBigInt(Local<Context> context) const {
RETURN_ESCAPED(result);
}
bool Value::BooleanValue(Isolate* v8_isolate) const {
return Utils::OpenHandle(this)->BooleanValue(
reinterpret_cast<i::Isolate*>(v8_isolate));
}
MaybeLocal<Boolean> Value::ToBoolean(Local<Context> context) const {
auto obj = Utils::OpenHandle(this);
if (obj->IsBoolean()) return ToApiHandle<Boolean>(obj);
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
auto val = isolate->factory()->ToBoolean(obj->BooleanValue(isolate));
return ToApiHandle<Boolean>(val);
return ToBoolean(context->GetIsolate());
}
Local<Boolean> Value::ToBoolean(Isolate* v8_isolate) const {
return ToBoolean(v8_isolate->GetCurrentContext()).ToLocalChecked();
auto isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
return ToApiHandle<Boolean>(
isolate->factory()->ToBoolean(BooleanValue(v8_isolate)));
}
@ -3888,36 +3884,6 @@ void v8::RegExp::CheckCast(v8::Value* that) {
}
bool Value::BooleanValue() const {
return BooleanValue(Isolate::GetCurrent()->GetCurrentContext())
.FromJust();
}
double Value::NumberValue() const {
return NumberValue(Isolate::GetCurrent()->GetCurrentContext())
.FromMaybe(std::numeric_limits<double>::quiet_NaN());
}
int64_t Value::IntegerValue() const {
return IntegerValue(Isolate::GetCurrent()->GetCurrentContext())
.FromMaybe(0);
}
uint32_t Value::Uint32Value() const {
return Uint32Value(Isolate::GetCurrent()->GetCurrentContext())
.FromMaybe(0);
}
int32_t Value::Int32Value() const {
return Int32Value(Isolate::GetCurrent()->GetCurrentContext())
.FromMaybe(0);
}
Maybe<bool> Value::BooleanValue(Local<Context> context) const {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
return Just(Utils::OpenHandle(this)->BooleanValue(isolate));
@ -4006,12 +3972,6 @@ MaybeLocal<Uint32> Value::ToArrayIndex(Local<Context> context) const {
}
bool Value::Equals(Local<Value> that) const {
return Equals(Isolate::GetCurrent()->GetCurrentContext(), that)
.FromMaybe(false);
}
Maybe<bool> Value::Equals(Local<Context> context, Local<Value> that) const {
i::Isolate* isolate = Utils::OpenHandle(*context)->GetIsolate();
auto self = Utils::OpenHandle(this);
@ -4063,7 +4023,8 @@ Maybe<bool> v8::Object::Set(v8::Local<v8::Context> context,
auto value_obj = Utils::OpenHandle(*value);
has_pending_exception =
i::Runtime::SetObjectProperty(isolate, self, key_obj, value_obj,
i::LanguageMode::kSloppy)
i::LanguageMode::kSloppy,
i::StoreOrigin::kMaybeKeyed)
.is_null();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return Just(true);
@ -4617,8 +4578,8 @@ static Maybe<bool> ObjectSetAccessor(
Local<Context> context, Object* self, Local<Name> name, Getter getter,
Setter setter, Data data, AccessControl settings,
PropertyAttribute attributes, bool is_special_data_property,
bool replace_on_access,
SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect) {
bool replace_on_access, SideEffectType getter_side_effect_type,
SideEffectType setter_side_effect_type) {
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
ENTER_V8_NO_SCRIPT(isolate, context, Object, SetAccessor, Nothing<bool>(),
i::HandleScope);
@ -4629,8 +4590,8 @@ static Maybe<bool> ObjectSetAccessor(
i::Handle<i::AccessorInfo> info =
MakeAccessorInfo(isolate, name, getter, setter, data, settings, signature,
is_special_data_property, replace_on_access);
info->set_has_no_side_effect(getter_side_effect_type ==
SideEffectType::kHasNoSideEffect);
info->set_getter_side_effect_type(getter_side_effect_type);
info->set_setter_side_effect_type(setter_side_effect_type);
if (info.is_null()) return Nothing<bool>();
bool fast = obj->HasFastProperties();
i::Handle<i::Object> result;
@ -4653,11 +4614,12 @@ Maybe<bool> Object::SetAccessor(Local<Context> context, Local<Name> name,
AccessorNameSetterCallback setter,
MaybeLocal<Value> data, AccessControl settings,
PropertyAttribute attribute,
SideEffectType getter_side_effect_type) {
SideEffectType getter_side_effect_type,
SideEffectType setter_side_effect_type) {
return ObjectSetAccessor(context, this, name, getter, setter,
data.FromMaybe(Local<Value>()), settings, attribute,
i::FLAG_disable_old_api_accessors, false,
getter_side_effect_type);
getter_side_effect_type, setter_side_effect_type);
}
@ -4684,19 +4646,22 @@ Maybe<bool> Object::SetNativeDataProperty(
v8::Local<v8::Context> context, v8::Local<Name> name,
AccessorNameGetterCallback getter, AccessorNameSetterCallback setter,
v8::Local<Value> data, PropertyAttribute attributes,
SideEffectType getter_side_effect_type) {
SideEffectType getter_side_effect_type,
SideEffectType setter_side_effect_type) {
return ObjectSetAccessor(context, this, name, getter, setter, data, DEFAULT,
attributes, true, false, getter_side_effect_type);
attributes, true, false, getter_side_effect_type,
setter_side_effect_type);
}
Maybe<bool> Object::SetLazyDataProperty(
v8::Local<v8::Context> context, v8::Local<Name> name,
AccessorNameGetterCallback getter, v8::Local<Value> data,
PropertyAttribute attributes, SideEffectType getter_side_effect_type) {
PropertyAttribute attributes, SideEffectType getter_side_effect_type,
SideEffectType setter_side_effect_type) {
return ObjectSetAccessor(context, this, name, getter,
static_cast<AccessorNameSetterCallback>(nullptr),
data, DEFAULT, attributes, true, true,
getter_side_effect_type);
getter_side_effect_type, setter_side_effect_type);
}
Maybe<bool> v8::Object::HasOwnProperty(Local<Context> context,
@ -5343,10 +5308,6 @@ bool String::ContainsOnlyOneByte() const {
return helper.Check(*str);
}
int String::Utf8Length() const {
return Utf8Length(Isolate::GetCurrent());
}
int String::Utf8Length(Isolate* isolate) const {
i::Handle<i::String> str = Utils::OpenHandle(this);
str = i::String::Flatten(reinterpret_cast<i::Isolate*>(isolate), str);
@ -5570,14 +5531,6 @@ static bool RecursivelySerializeToUtf8(i::String* current,
return true;
}
int String::WriteUtf8(char* buffer, int capacity,
int* nchars_ref, int options) const {
return WriteUtf8(Isolate::GetCurrent(),
buffer, capacity, nchars_ref, options);
}
int String::WriteUtf8(Isolate* v8_isolate, char* buffer, int capacity,
int* nchars_ref, int options) const {
i::Handle<i::String> str = Utils::OpenHandle(this);
@ -5645,18 +5598,6 @@ static inline int WriteHelper(i::Isolate* isolate, const String* string,
}
int String::WriteOneByte(uint8_t* buffer, int start,
int length, int options) const {
return WriteOneByte(Isolate::GetCurrent(), buffer, start, length, options);
}
int String::Write(uint16_t* buffer, int start, int length,
int options) const {
return Write(Isolate::GetCurrent(), buffer, start, length, options);
}
int String::WriteOneByte(Isolate* isolate, uint8_t* buffer, int start,
int length, int options) const {
return WriteHelper(reinterpret_cast<i::Isolate*>(isolate), this, buffer,
@ -6010,16 +5951,16 @@ HeapStatistics::HeapStatistics()
malloced_memory_(0),
external_memory_(0),
peak_malloced_memory_(0),
does_zap_garbage_(0),
does_zap_garbage_(false),
number_of_native_contexts_(0),
number_of_detached_contexts_(0) {}
HeapSpaceStatistics::HeapSpaceStatistics(): space_name_(0),
space_size_(0),
space_used_size_(0),
space_available_size_(0),
physical_space_size_(0) { }
HeapSpaceStatistics::HeapSpaceStatistics()
: space_name_(nullptr),
space_size_(0),
space_used_size_(0),
space_available_size_(0),
physical_space_size_(0) {}
HeapObjectStatistics::HeapObjectStatistics()
: object_type_(nullptr),
@ -6604,11 +6545,6 @@ MaybeLocal<String> String::NewFromTwoByte(Isolate* isolate,
return result;
}
Local<String> v8::String::Concat(Local<String> left,
Local<String> right) {
return Concat(Isolate::GetCurrent(), left, right);
}
Local<String> v8::String::Concat(Isolate* v8_isolate, Local<String> left,
Local<String> right) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
@ -6793,7 +6729,6 @@ double v8::NumberObject::ValueOf() const {
}
Local<v8::Value> v8::BigIntObject::New(Isolate* isolate, int64_t value) {
CHECK(i::FLAG_harmony_bigint);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, BigIntObject, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
@ -6835,11 +6770,6 @@ bool v8::BooleanObject::ValueOf() const {
}
Local<v8::Value> v8::StringObject::New(Local<String> value) {
return New(Isolate::GetCurrent(), value);
}
Local<v8::Value> v8::StringObject::New(Isolate* v8_isolate,
Local<String> value) {
i::Handle<i::String> string = Utils::OpenHandle(*value);
@ -6981,23 +6911,6 @@ Local<v8::Array> v8::Array::New(Isolate* isolate, int length) {
return Utils::ToLocal(obj);
}
Local<v8::Array> v8::Array::New(Isolate* isolate, Local<Value>* elements,
size_t length) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::Factory* factory = i_isolate->factory();
LOG_API(i_isolate, Array, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
int len = static_cast<int>(length);
i::Handle<i::FixedArray> result = factory->NewFixedArray(len);
for (int i = 0; i < len; i++) {
i::Handle<i::Object> element = Utils::OpenHandle(*elements[i]);
result->set(i, *element);
}
return Utils::ToLocal(
factory->NewJSArrayWithElements(result, i::PACKED_ELEMENTS, len));
}
uint32_t v8::Array::Length() const {
i::Handle<i::JSArray> obj = Utils::OpenHandle(this);
@ -7103,30 +7016,30 @@ i::Handle<i::JSArray> MapAsArray(i::Isolate* isolate, i::Object* table_obj,
i::Factory* factory = isolate->factory();
i::Handle<i::OrderedHashMap> table(i::OrderedHashMap::cast(table_obj),
isolate);
const bool collect_keys =
kind == MapAsArrayKind::kEntries || kind == MapAsArrayKind::kKeys;
const bool collect_values =
kind == MapAsArrayKind::kEntries || kind == MapAsArrayKind::kValues;
int capacity = table->UsedCapacity();
int max_length =
(capacity - offset) * ((collect_keys && collect_values) ? 2 : 1);
i::Handle<i::FixedArray> result = factory->NewFixedArray(max_length);
if (offset >= table->NumberOfElements()) return factory->NewJSArray(0);
int length = (table->NumberOfElements() - offset) *
(kind == MapAsArrayKind::kEntries ? 2 : 1);
i::Handle<i::FixedArray> result = factory->NewFixedArray(length);
int result_index = 0;
{
i::DisallowHeapAllocation no_gc;
int capacity = table->UsedCapacity();
i::Oddball* the_hole = i::ReadOnlyRoots(isolate).the_hole_value();
for (int i = offset; i < capacity; ++i) {
for (int i = 0; i < capacity; ++i) {
i::Object* key = table->KeyAt(i);
if (key == the_hole) continue;
if (collect_keys) result->set(result_index++, key);
if (collect_values) result->set(result_index++, table->ValueAt(i));
if (offset-- > 0) continue;
if (kind == MapAsArrayKind::kEntries || kind == MapAsArrayKind::kKeys) {
result->set(result_index++, key);
}
if (kind == MapAsArrayKind::kEntries || kind == MapAsArrayKind::kValues) {
result->set(result_index++, table->ValueAt(i));
}
}
}
DCHECK_GE(max_length, result_index);
if (result_index == 0) return factory->NewJSArray(0);
result->Shrink(isolate, result_index);
return factory->NewJSArrayWithElements(result, i::PACKED_ELEMENTS,
result_index);
DCHECK_EQ(result_index, result->length());
DCHECK_EQ(result_index, length);
return factory->NewJSArrayWithElements(result, i::PACKED_ELEMENTS, length);
}
} // namespace
@ -7211,26 +7124,24 @@ i::Handle<i::JSArray> SetAsArray(i::Isolate* isolate, i::Object* table_obj,
i::Factory* factory = isolate->factory();
i::Handle<i::OrderedHashSet> table(i::OrderedHashSet::cast(table_obj),
isolate);
// Elements skipped by |offset| may already be deleted.
int capacity = table->UsedCapacity();
int max_length = capacity - offset;
if (max_length == 0) return factory->NewJSArray(0);
i::Handle<i::FixedArray> result = factory->NewFixedArray(max_length);
int length = table->NumberOfElements() - offset;
if (length <= 0) return factory->NewJSArray(0);
i::Handle<i::FixedArray> result = factory->NewFixedArray(length);
int result_index = 0;
{
i::DisallowHeapAllocation no_gc;
int capacity = table->UsedCapacity();
i::Oddball* the_hole = i::ReadOnlyRoots(isolate).the_hole_value();
for (int i = offset; i < capacity; ++i) {
for (int i = 0; i < capacity; ++i) {
i::Object* key = table->KeyAt(i);
if (key == the_hole) continue;
if (offset-- > 0) continue;
result->set(result_index++, key);
}
}
DCHECK_GE(max_length, result_index);
if (result_index == 0) return factory->NewJSArray(0);
result->Shrink(isolate, result_index);
return factory->NewJSArrayWithElements(result, i::PACKED_ELEMENTS,
result_index);
DCHECK_EQ(result_index, result->length());
DCHECK_EQ(result_index, length);
return factory->NewJSArrayWithElements(result, i::PACKED_ELEMENTS, length);
}
} // namespace
@ -7501,7 +7412,7 @@ class AsyncCompilationResolver : public i::wasm::CompilationResultResolver {
reinterpret_cast<i::Isolate*>(isolate)->global_handles()->Create(
*Utils::OpenHandle(*promise))) {}
~AsyncCompilationResolver() {
~AsyncCompilationResolver() override {
i::GlobalHandles::Destroy(i::Handle<i::Object>::cast(promise_).location());
}
@ -7540,9 +7451,6 @@ void WasmModuleObjectBuilderStreaming::Finish() {
void WasmModuleObjectBuilderStreaming::Abort(MaybeLocal<Value> exception) {
}
WasmModuleObjectBuilderStreaming::~WasmModuleObjectBuilderStreaming() {
}
// static
v8::ArrayBuffer::Allocator* v8::ArrayBuffer::Allocator::NewDefaultAllocator() {
return new ArrayBufferAllocator();
@ -7602,9 +7510,8 @@ void ArrayBufferDeleter(void* buffer, size_t length, void* info) {
v8::ArrayBuffer::Contents v8::ArrayBuffer::GetContents() {
i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this);
size_t byte_length = static_cast<size_t>(self->byte_length()->Number());
Contents contents(
self->backing_store(), byte_length, self->allocation_base(),
self->backing_store(), self->byte_length(), self->allocation_base(),
self->allocation_length(),
self->is_wasm_memory() ? Allocator::AllocationMode::kReservation
: Allocator::AllocationMode::kNormal,
@ -7632,7 +7539,7 @@ void v8::ArrayBuffer::Neuter() {
size_t v8::ArrayBuffer::ByteLength() const {
i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this);
return static_cast<size_t>(obj->byte_length()->Number());
return obj->byte_length();
}
@ -7656,6 +7563,7 @@ Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, void* data,
ArrayBufferCreationMode mode) {
// Embedders must guarantee that the external backing store is valid.
CHECK(byte_length == 0 || data != nullptr);
CHECK_LE(byte_length, i::JSArrayBuffer::kMaxByteLength);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, ArrayBuffer, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
@ -7687,9 +7595,8 @@ Local<ArrayBuffer> v8::ArrayBufferView::Buffer() {
size_t v8::ArrayBufferView::CopyContents(void* dest, size_t byte_length) {
i::Handle<i::JSArrayBufferView> self = Utils::OpenHandle(this);
size_t byte_offset = i::NumberToSize(self->byte_offset());
size_t bytes_to_copy =
i::Min(byte_length, i::NumberToSize(self->byte_length()));
size_t byte_offset = self->byte_offset();
size_t bytes_to_copy = i::Min(byte_length, self->byte_length());
if (bytes_to_copy) {
i::DisallowHeapAllocation no_gc;
i::Isolate* isolate = self->GetIsolate();
@ -7720,19 +7627,19 @@ bool v8::ArrayBufferView::HasBuffer() const {
size_t v8::ArrayBufferView::ByteOffset() {
i::Handle<i::JSArrayBufferView> obj = Utils::OpenHandle(this);
return static_cast<size_t>(obj->byte_offset()->Number());
return obj->WasNeutered() ? 0 : obj->byte_offset();
}
size_t v8::ArrayBufferView::ByteLength() {
i::Handle<i::JSArrayBufferView> obj = Utils::OpenHandle(this);
return static_cast<size_t>(obj->byte_length()->Number());
return obj->WasNeutered() ? 0 : obj->byte_length();
}
size_t v8::TypedArray::Length() {
i::Handle<i::JSTypedArray> obj = Utils::OpenHandle(this);
return obj->length_value();
return obj->WasNeutered() ? 0 : obj->length_value();
}
static_assert(v8::TypedArray::kMaxLength == i::Smi::kMaxValue,
@ -7840,9 +7747,8 @@ v8::SharedArrayBuffer::Contents::Contents(
v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::GetContents() {
i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this);
size_t byte_length = static_cast<size_t>(self->byte_length()->Number());
Contents contents(
self->backing_store(), byte_length, self->allocation_base(),
self->backing_store(), self->byte_length(), self->allocation_base(),
self->allocation_length(),
self->is_wasm_memory()
? ArrayBuffer::Allocator::AllocationMode::kReservation
@ -7858,7 +7764,7 @@ v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::GetContents() {
size_t v8::SharedArrayBuffer::ByteLength() const {
i::Handle<i::JSArrayBuffer> obj = Utils::OpenHandle(this);
return static_cast<size_t>(obj->byte_length()->Number());
return obj->byte_length();
}
Local<SharedArrayBuffer> v8::SharedArrayBuffer::New(Isolate* isolate,
@ -7912,8 +7818,8 @@ Local<Symbol> v8::Symbol::New(Isolate* isolate, Local<String> name) {
Local<Symbol> v8::Symbol::For(Isolate* isolate, Local<String> name) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::Handle<i::String> i_name = Utils::OpenHandle(*name);
return Utils::ToLocal(i_isolate->SymbolFor(
i::Heap::kPublicSymbolTableRootIndex, i_name, false));
return Utils::ToLocal(
i_isolate->SymbolFor(i::RootIndex::kPublicSymbolTable, i_name, false));
}
@ -7921,10 +7827,11 @@ Local<Symbol> v8::Symbol::ForApi(Isolate* isolate, Local<String> name) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::Handle<i::String> i_name = Utils::OpenHandle(*name);
return Utils::ToLocal(
i_isolate->SymbolFor(i::Heap::kApiSymbolTableRootIndex, i_name, false));
i_isolate->SymbolFor(i::RootIndex::kApiSymbolTable, i_name, false));
}
#define WELL_KNOWN_SYMBOLS(V) \
V(AsyncIterator, async_iterator) \
V(HasInstance, has_instance) \
V(IsConcatSpreadable, is_concat_spreadable) \
V(Iterator, iterator) \
@ -7961,8 +7868,8 @@ Local<Private> v8::Private::New(Isolate* isolate, Local<String> name) {
Local<Private> v8::Private::ForApi(Isolate* isolate, Local<String> name) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
i::Handle<i::String> i_name = Utils::OpenHandle(*name);
Local<Symbol> result = Utils::ToLocal(i_isolate->SymbolFor(
i::Heap::kApiPrivateSymbolTableRootIndex, i_name, true));
Local<Symbol> result = Utils::ToLocal(
i_isolate->SymbolFor(i::RootIndex::kApiPrivateSymbolTable, i_name, true));
return v8::Local<Private>(reinterpret_cast<Private*>(*result));
}
@ -8003,7 +7910,6 @@ Local<Integer> v8::Integer::NewFromUnsigned(Isolate* isolate, uint32_t value) {
}
Local<BigInt> v8::BigInt::New(Isolate* isolate, int64_t value) {
CHECK(i::FLAG_harmony_bigint);
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(internal_isolate);
i::Handle<i::BigInt> result = i::BigInt::FromInt64(internal_isolate, value);
@ -8011,7 +7917,6 @@ Local<BigInt> v8::BigInt::New(Isolate* isolate, int64_t value) {
}
Local<BigInt> v8::BigInt::NewFromUnsigned(Isolate* isolate, uint64_t value) {
CHECK(i::FLAG_harmony_bigint);
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(internal_isolate);
i::Handle<i::BigInt> result = i::BigInt::FromUint64(internal_isolate, value);
@ -8021,7 +7926,6 @@ Local<BigInt> v8::BigInt::NewFromUnsigned(Isolate* isolate, uint64_t value) {
MaybeLocal<BigInt> v8::BigInt::NewFromWords(Local<Context> context,
int sign_bit, int word_count,
const uint64_t* words) {
CHECK(i::FLAG_harmony_bigint);
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
ENTER_V8_NO_SCRIPT(isolate, context, BigInt, NewFromWords,
MaybeLocal<BigInt>(), InternalEscapableScope);
@ -8186,6 +8090,11 @@ void Isolate::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
isolate->heap()->SetEmbedderHeapTracer(tracer);
}
EmbedderHeapTracer* Isolate::GetEmbedderHeapTracer() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
return isolate->heap()->GetEmbedderHeapTracer();
}
void Isolate::SetGetExternallyAllocatedMemoryInBytesCallback(
GetExternallyAllocatedMemoryInBytesCallback callback) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
@ -8225,9 +8134,9 @@ void Isolate::RequestGarbageCollectionForTesting(GarbageCollectionType type) {
kGCCallbackFlagForced);
} else {
DCHECK_EQ(kFullGarbageCollection, type);
reinterpret_cast<i::Isolate*>(this)->heap()->CollectAllGarbage(
i::Heap::kAbortIncrementalMarkingMask,
i::GarbageCollectionReason::kTesting, kGCCallbackFlagForced);
reinterpret_cast<i::Isolate*>(this)->heap()->PreciseCollectAllGarbage(
i::Heap::kNoGCFlags, i::GarbageCollectionReason::kTesting,
kGCCallbackFlagForced);
}
}
@ -8296,7 +8205,11 @@ void Isolate::Initialize(Isolate* isolate,
if (params.entry_hook || !i::Snapshot::Initialize(i_isolate)) {
// If snapshot data was provided and we failed to deserialize it must
// have been corrupted.
CHECK_NULL(i_isolate->snapshot_blob());
if (i_isolate->snapshot_blob() != nullptr) {
FATAL(
"Failed to deserialize the V8 snapshot blob. This can mean that the "
"snapshot blob file is corrupted or missing.");
}
base::ElapsedTimer timer;
if (i::FLAG_profile_deserialization) timer.Start();
i_isolate->Init(nullptr);
@ -8366,6 +8279,11 @@ void Isolate::SetHostInitializeImportMetaObjectCallback(
isolate->SetHostInitializeImportMetaObjectCallback(callback);
}
void Isolate::SetPrepareStackTraceCallback(PrepareStackTraceCallback callback) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
isolate->SetPrepareStackTraceCallback(callback);
}
Isolate::DisallowJavascriptExecutionScope::DisallowJavascriptExecutionScope(
Isolate* isolate,
Isolate::DisallowJavascriptExecutionScope::OnFailure on_failure)
@ -8791,17 +8709,17 @@ void Isolate::SetStackLimit(uintptr_t stack_limit) {
void Isolate::GetCodeRange(void** start, size_t* length_in_bytes) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
if (isolate->heap()->memory_allocator()->code_range()->valid()) {
*start = reinterpret_cast<void*>(
isolate->heap()->memory_allocator()->code_range()->start());
*length_in_bytes =
isolate->heap()->memory_allocator()->code_range()->size();
} else {
*start = nullptr;
*length_in_bytes = 0;
}
const base::AddressRegion& code_range =
isolate->heap()->memory_allocator()->code_range();
*start = reinterpret_cast<void*>(code_range.begin());
*length_in_bytes = code_range.size();
}
MemoryRange Isolate::GetEmbeddedCodeRange() {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
return {reinterpret_cast<const void*>(isolate->embedded_blob()),
isolate->embedded_blob_size()};
}
#define CALLBACK_SETTER(ExternalName, Type, InternalName) \
void Isolate::Set##ExternalName(Type callback) { \
@ -8986,9 +8904,6 @@ bool MicrotasksScope::IsRunningMicrotasks(Isolate* v8Isolate) {
return isolate->IsRunningMicrotasks();
}
String::Utf8Value::Utf8Value(v8::Local<v8::Value> obj)
: Utf8Value(Isolate::GetCurrent(), obj) {}
String::Utf8Value::Utf8Value(v8::Isolate* isolate, v8::Local<v8::Value> obj)
: str_(nullptr), length_(0) {
if (obj.IsEmpty()) return;
@ -9008,9 +8923,6 @@ String::Utf8Value::~Utf8Value() {
i::DeleteArray(str_);
}
String::Value::Value(v8::Local<v8::Value> obj)
: Value(Isolate::GetCurrent(), obj) {}
String::Value::Value(v8::Isolate* isolate, v8::Local<v8::Value> obj)
: str_(nullptr), length_(0) {
if (obj.IsEmpty()) return;
@ -9192,7 +9104,10 @@ int debug::Script::ColumnOffset() const {
std::vector<int> debug::Script::LineEnds() const {
i::Handle<i::Script> script = Utils::OpenHandle(this);
if (script->type() == i::Script::TYPE_WASM) return std::vector<int>();
if (script->type() == i::Script::TYPE_WASM &&
this->SourceMappingURL().IsEmpty()) {
return std::vector<int>();
}
i::Isolate* isolate = script->GetIsolate();
i::HandleScope scope(isolate);
i::Script::InitLineEnds(script);
@ -9281,7 +9196,8 @@ bool debug::Script::GetPossibleBreakpoints(
std::vector<debug::BreakLocation>* locations) const {
CHECK(!start.IsEmpty());
i::Handle<i::Script> script = Utils::OpenHandle(this);
if (script->type() == i::Script::TYPE_WASM) {
if (script->type() == i::Script::TYPE_WASM &&
this->SourceMappingURL().IsEmpty()) {
i::WasmModuleObject* module_object =
i::WasmModuleObject::cast(script->wasm_module_object());
return module_object->GetPossibleBreakpoints(start, end, locations);
@ -9332,9 +9248,13 @@ bool debug::Script::GetPossibleBreakpoints(
int debug::Script::GetSourceOffset(const debug::Location& location) const {
i::Handle<i::Script> script = Utils::OpenHandle(this);
if (script->type() == i::Script::TYPE_WASM) {
return i::WasmModuleObject::cast(script->wasm_module_object())
->GetFunctionOffset(location.GetLineNumber()) +
location.GetColumnNumber();
if (this->SourceMappingURL().IsEmpty()) {
return i::WasmModuleObject::cast(script->wasm_module_object())
->GetFunctionOffset(location.GetLineNumber()) +
location.GetColumnNumber();
}
DCHECK_EQ(0, location.GetLineNumber());
return location.GetColumnNumber();
}
int line = std::max(location.GetLineNumber() - script->line_offset(), 0);
@ -9777,10 +9697,10 @@ int debug::GetNativeAccessorDescriptor(v8::Local<v8::Context> context,
}
auto isolate = reinterpret_cast<i::Isolate*>(context->GetIsolate());
int result = 0;
#define IS_BUILTIN_ACESSOR(name, _) \
#define IS_BUILTIN_ACESSOR(_, name, ...) \
if (*structure == *isolate->factory()->name##_accessor()) \
result |= static_cast<int>(debug::NativeAccessorType::IsBuiltin);
ACCESSOR_INFO_LIST(IS_BUILTIN_ACESSOR)
ACCESSOR_INFO_LIST_GENERATOR(IS_BUILTIN_ACESSOR, /* not used */)
#undef IS_BUILTIN_ACESSOR
i::Handle<i::AccessorInfo> accessor_info =
i::Handle<i::AccessorInfo>::cast(structure);
@ -9826,7 +9746,7 @@ debug::PostponeInterruptsScope::PostponeInterruptsScope(v8::Isolate* isolate)
new i::PostponeInterruptsScope(reinterpret_cast<i::Isolate*>(isolate),
i::StackGuard::API_INTERRUPT)) {}
debug::PostponeInterruptsScope::~PostponeInterruptsScope() {}
debug::PostponeInterruptsScope::~PostponeInterruptsScope() = default;
Local<String> CpuProfileNode::GetFunctionName() const {
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
@ -9950,6 +9870,47 @@ debug::TypeProfile::ScriptData debug::TypeProfile::GetScriptData(
return ScriptData(i, type_profile_);
}
v8::MaybeLocal<v8::Value> debug::WeakMap::Get(v8::Local<v8::Context> context,
v8::Local<v8::Value> key) {
PREPARE_FOR_EXECUTION(context, WeakMap, Get, Value);
auto self = Utils::OpenHandle(this);
Local<Value> result;
i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key)};
has_pending_exception =
!ToLocal<Value>(i::Execution::Call(isolate, isolate->weakmap_get(), self,
arraysize(argv), argv),
&result);
RETURN_ON_FAILED_EXECUTION(Value);
RETURN_ESCAPED(result);
}
v8::MaybeLocal<debug::WeakMap> debug::WeakMap::Set(
v8::Local<v8::Context> context, v8::Local<v8::Value> key,
v8::Local<v8::Value> value) {
PREPARE_FOR_EXECUTION(context, WeakMap, Set, WeakMap);
auto self = Utils::OpenHandle(this);
i::Handle<i::Object> result;
i::Handle<i::Object> argv[] = {Utils::OpenHandle(*key),
Utils::OpenHandle(*value)};
has_pending_exception = !i::Execution::Call(isolate, isolate->weakmap_set(),
self, arraysize(argv), argv)
.ToHandle(&result);
RETURN_ON_FAILED_EXECUTION(WeakMap);
RETURN_ESCAPED(Local<WeakMap>::Cast(Utils::ToLocal(result)));
}
Local<debug::WeakMap> debug::WeakMap::New(v8::Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, WeakMap, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::Handle<i::JSWeakMap> obj = i_isolate->factory()->NewJSWeakMap();
return ToApiHandle<debug::WeakMap>(obj);
}
debug::WeakMap* debug::WeakMap::Cast(v8::Value* value) {
return static_cast<debug::WeakMap*>(value);
}
const char* CpuProfileNode::GetFunctionNameStr() const {
const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this);
return node->entry()->name();
@ -10134,11 +10095,6 @@ void CpuProfiler::SetIdle(bool is_idle) {
isolate->SetIdle(is_idle);
}
void CpuProfiler::UseDetailedSourcePositionsForProfiling(Isolate* isolate) {
reinterpret_cast<i::Isolate*>(isolate)
->set_detailed_source_positions_for_profiling(true);
}
uintptr_t CodeEvent::GetCodeStartAddress() {
return reinterpret_cast<i::CodeEvent*>(this)->code_start_address;
}
@ -10546,9 +10502,9 @@ void EmbedderHeapTracer::GarbageCollectionForTesting(
CHECK(i::FLAG_expose_gc);
i::Heap* const heap = reinterpret_cast<i::Isolate*>(isolate_)->heap();
heap->SetEmbedderStackStateForNextFinalizaton(stack_state);
heap->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask,
i::GarbageCollectionReason::kTesting,
kGCCallbackFlagForced);
heap->PreciseCollectAllGarbage(i::Heap::kNoGCFlags,
i::GarbageCollectionReason::kTesting,
kGCCallbackFlagForced);
}
bool EmbedderHeapTracer::AdvanceTracing(double deadline_in_ms) {

1
deps/v8/src/api.h vendored
View File

@ -116,6 +116,7 @@ class RegisteredExtension {
V(Proxy, JSProxy) \
V(debug::GeneratorObject, JSGeneratorObject) \
V(debug::Script, Script) \
V(debug::WeakMap, JSWeakMap) \
V(Promise, JSPromise) \
V(Primitive, Object) \
V(PrimitiveArray, FixedArray) \

View File

@ -27,7 +27,7 @@ namespace internal {
// Note that length_ (whose value is in the integer range) is defined
// as intptr_t to provide endian-neutrality on 64-bit archs.
class Arguments BASE_EMBEDDED {
class Arguments {
public:
Arguments(int length, Object** arguments)
: length_(length), arguments_(arguments) {

View File

@ -46,6 +46,7 @@
#include "src/deoptimizer.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
#include "src/string-constants.h"
namespace v8 {
namespace internal {
@ -417,6 +418,13 @@ Operand Operand::EmbeddedCode(CodeStub* stub) {
return result;
}
Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
Operand result(0, RelocInfo::EMBEDDED_OBJECT);
result.is_heap_object_request_ = true;
result.value_.heap_object_request = HeapObjectRequest(str);
return result;
}
MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am)
: rn_(rn), rm_(no_reg), offset_(offset), am_(am) {
// Accesses below the stack pointer are not safe, and are prohibited by the
@ -472,6 +480,7 @@ void NeonMemOperand::SetAlignment(int align) {
}
void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty());
for (auto& request : heap_object_requests_) {
Handle<HeapObject> object;
switch (request.kind()) {
@ -483,6 +492,12 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
request.code_stub()->set_isolate(isolate);
object = request.code_stub()->GetCode();
break;
case HeapObjectRequest::kStringConstant: {
const StringConstantBase* str = request.string();
CHECK_NOT_NULL(str);
object = str->AllocateStringConstant(isolate);
break;
}
}
Address pc = reinterpret_cast<Address>(buffer_) + request.offset();
Memory<Address>(constant_pool_entry_address(pc, 0 /* unused */)) =
@ -1418,7 +1433,7 @@ int Assembler::branch_offset(Label* L) {
// Branch instructions.
void Assembler::b(int branch_offset, Condition cond, RelocInfo::Mode rmode) {
RecordRelocInfo(rmode);
if (!RelocInfo::IsNone(rmode)) RecordRelocInfo(rmode);
DCHECK_EQ(branch_offset & 3, 0);
int imm24 = branch_offset >> 2;
const bool b_imm_check = is_int24(imm24);
@ -1432,7 +1447,7 @@ void Assembler::b(int branch_offset, Condition cond, RelocInfo::Mode rmode) {
}
void Assembler::bl(int branch_offset, Condition cond, RelocInfo::Mode rmode) {
RecordRelocInfo(rmode);
if (!RelocInfo::IsNone(rmode)) RecordRelocInfo(rmode);
DCHECK_EQ(branch_offset & 3, 0);
int imm24 = branch_offset >> 2;
const bool bl_imm_check = is_int24(imm24);
@ -5103,13 +5118,7 @@ void Assembler::dq(uint64_t value) {
}
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
if (options().disable_reloc_info_for_patching) return;
if (RelocInfo::IsNone(rmode) ||
// Don't record external references unless the heap will be serialized.
(RelocInfo::IsOnlyForSerializer(rmode) &&
!options().record_reloc_info_for_serialization && !emit_debug_code())) {
return;
}
if (!ShouldRecordRelocInfo(rmode)) return;
DCHECK_GE(buffer_space(), kMaxRelocSize); // too late to grow buffer here
RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, nullptr);
reloc_info_writer.Write(&rinfo);

View File

@ -393,7 +393,7 @@ enum Coprocessor {
// Machine instruction Operands
// Class Operand represents a shifter operand in data processing instructions
class Operand BASE_EMBEDDED {
class Operand {
public:
// immediate
V8_INLINE explicit Operand(int32_t immediate,
@ -425,6 +425,7 @@ class Operand BASE_EMBEDDED {
static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
static Operand EmbeddedCode(CodeStub* stub);
static Operand EmbeddedStringConstant(const StringConstantBase* str);
// Return true if this is a register operand.
bool IsRegister() const {
@ -498,7 +499,7 @@ class Operand BASE_EMBEDDED {
// Class MemOperand represents a memory operand in load and store instructions
class MemOperand BASE_EMBEDDED {
class MemOperand {
public:
// [rn +/- offset] Offset/NegOffset
// [rn +/- offset]! PreIndex/NegPreIndex
@ -557,7 +558,7 @@ class MemOperand BASE_EMBEDDED {
// Class NeonMemOperand represents a memory operand in load and
// store NEON instructions
class NeonMemOperand BASE_EMBEDDED {
class NeonMemOperand {
public:
// [rn {:align}] Offset
// [rn {:align}]! PostIndex
@ -580,7 +581,7 @@ class NeonMemOperand BASE_EMBEDDED {
// Class NeonListOperand represents a list of NEON registers
class NeonListOperand BASE_EMBEDDED {
class NeonListOperand {
public:
explicit NeonListOperand(DoubleRegister base, int register_count = 1)
: base_(base), register_count_(register_count) {}
@ -1693,7 +1694,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
friend class UseScratchRegisterScope;
};
class EnsureSpace BASE_EMBEDDED {
class EnsureSpace {
public:
V8_INLINE explicit EnsureSpace(Assembler* assembler);
};

View File

@ -131,7 +131,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
IsolateAddressId::kPendingExceptionAddress, isolate())));
}
__ str(r0, MemOperand(scratch));
__ LoadRoot(r0, Heap::kExceptionRootIndex);
__ LoadRoot(r0, RootIndex::kException);
__ b(&exit);
// Invoke: Link this frame into the handler chain.
@ -418,7 +418,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
__ LeaveExitFrame(false, r4, stack_space_operand != nullptr);
// Check if the function scheduled an exception.
__ LoadRoot(r4, Heap::kTheHoleValueRootIndex);
__ LoadRoot(r4, RootIndex::kTheHoleValue);
__ Move(r6, ExternalReference::scheduled_exception_address(isolate));
__ ldr(r5, MemOperand(r6));
__ cmp(r4, r5);
@ -469,14 +469,14 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(FCA::kHolderIndex == 0);
// new target
__ PushRoot(Heap::kUndefinedValueRootIndex);
__ PushRoot(RootIndex::kUndefinedValue);
// call data
__ push(call_data);
Register scratch0 = call_data;
Register scratch1 = r5;
__ LoadRoot(scratch0, Heap::kUndefinedValueRootIndex);
__ LoadRoot(scratch0, RootIndex::kUndefinedValue);
// return value
__ push(scratch0);
// return value default
@ -549,7 +549,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
// Push data from AccessorInfo.
__ ldr(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
__ push(scratch);
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
__ Push(scratch, scratch);
__ Move(scratch, ExternalReference::isolate_address(isolate()));
__ Push(scratch, holder);

View File

@ -9,7 +9,6 @@
#include "src/arm/assembler-arm-inl.h"
#include "src/arm/simulator-arm.h"
#include "src/codegen.h"
#include "src/isolate.h"
#include "src/macro-assembler.h"
namespace v8 {
@ -19,17 +18,17 @@ namespace internal {
#if defined(V8_HOST_ARCH_ARM)
MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
MemCopyUint8Function stub) {
MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
#if defined(USE_SIMULATOR)
return stub;
#else
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
size_t allocated = 0;
byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
byte* buffer = AllocatePage(page_allocator,
page_allocator->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return stub;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
CodeObjectRequired::kNo);
MacroAssembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
Register dest = r0;
Register src = r1;
@ -166,11 +165,12 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
__ Ret();
CodeDesc desc;
masm.GetCode(isolate, &desc);
masm.GetCode(nullptr, &desc);
DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
CHECK(SetPermissions(page_allocator, buffer, allocated,
PageAllocator::kReadExecute));
return FUNCTION_CAST<MemCopyUint8Function>(buffer);
#endif
}
@ -178,16 +178,17 @@ MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
// Convert 8 to 16. The number of character to copy must be at least 8.
MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
Isolate* isolate, MemCopyUint16Uint8Function stub) {
MemCopyUint16Uint8Function stub) {
#if defined(USE_SIMULATOR)
return stub;
#else
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
size_t allocated = 0;
byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
byte* buffer = AllocatePage(page_allocator,
page_allocator->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return stub;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
CodeObjectRequired::kNo);
MacroAssembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
Register dest = r0;
Register src = r1;
@ -256,25 +257,27 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
}
CodeDesc desc;
masm.GetCode(isolate, &desc);
masm.GetCode(nullptr, &desc);
Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
CHECK(SetPermissions(page_allocator, buffer, allocated,
PageAllocator::kReadExecute));
return FUNCTION_CAST<MemCopyUint16Uint8Function>(buffer);
#endif
}
#endif
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
UnaryMathFunction CreateSqrtFunction() {
#if defined(USE_SIMULATOR)
return nullptr;
#else
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
size_t allocated = 0;
byte* buffer = AllocatePage(isolate->heap()->GetRandomMmapAddr(), &allocated);
byte* buffer = AllocatePage(page_allocator,
page_allocator->GetRandomMmapAddr(), &allocated);
if (buffer == nullptr) return nullptr;
MacroAssembler masm(isolate, buffer, static_cast<int>(allocated),
CodeObjectRequired::kNo);
MacroAssembler masm(AssemblerOptions{}, buffer, static_cast<int>(allocated));
__ MovFromFloatParameter(d0);
__ vsqrt(d0, d0);
@ -282,12 +285,13 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
__ Ret();
CodeDesc desc;
masm.GetCode(isolate, &desc);
masm.GetCode(nullptr, &desc);
DCHECK(!RelocInfo::RequiresRelocationAfterCodegen(desc));
Assembler::FlushICache(buffer, allocated);
CHECK(SetPermissions(buffer, allocated, PageAllocator::kReadExecute));
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
CHECK(SetPermissions(page_allocator, buffer, allocated,
PageAllocator::kReadExecute));
return FUNCTION_CAST<UnaryMathFunction>(buffer);
#endif
}

View File

@ -88,9 +88,9 @@ void CallVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// r0 : number of arguments (on the stack, not including receiver)
// r1 : the target to call
// r2 : arguments list (FixedArray)
// r4 : arguments list length (untagged)
Register registers[] = {r1, r0, r2, r4};
// r2 : arguments list (FixedArray)
Register registers[] = {r1, r0, r4, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@ -125,9 +125,9 @@ void ConstructVarargsDescriptor::InitializePlatformSpecific(
// r0 : number of arguments (on the stack, not including receiver)
// r1 : the target to call
// r3 : the new target
// r2 : arguments list (FixedArray)
// r4 : arguments list length (untagged)
Register registers[] = {r1, r3, r0, r2, r4};
// r2 : arguments list (FixedArray)
Register registers[] = {r1, r3, r0, r4, r2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@ -193,7 +193,7 @@ void BinaryOpDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
void ArgumentsAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r1, // JSFunction
@ -237,10 +237,10 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
r0, // argument count (not including receiver)
r3, // new target
r4, // address of the first argument
r1, // constructor to call
r3, // new target
r2, // allocation site feedback if available, undefined otherwise
r4 // address of the first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}

View File

@ -130,7 +130,7 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
void TurboAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
Heap::kBuiltinsConstantsTableRootIndex));
RootIndex::kBuiltinsConstantsTable));
// The ldr call below could end up clobbering ip when the offset does not fit
// into 12 bits (and thus needs to be loaded from the constant pool). In that
@ -147,7 +147,7 @@ void TurboAssembler::LoadFromConstantsTable(Register destination,
reg = r7;
}
LoadRoot(reg, Heap::kBuiltinsConstantsTableRootIndex);
LoadRoot(reg, RootIndex::kBuiltinsConstantsTable);
ldr(destination, MemOperand(reg, offset));
if (could_clobber_ip) {
@ -527,7 +527,7 @@ void MacroAssembler::Store(Register src,
}
}
void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
void TurboAssembler::LoadRoot(Register destination, RootIndex index,
Condition cond) {
ldr(destination, MemOperand(kRootRegister, RootRegisterOffset(index)), cond);
}
@ -615,8 +615,6 @@ void TurboAssembler::CallRecordWriteStub(
RecordWriteDescriptor::kObject));
Register slot_parameter(
callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot));
Register isolate_parameter(callable.descriptor().GetRegisterParameter(
RecordWriteDescriptor::kIsolate));
Register remembered_set_parameter(callable.descriptor().GetRegisterParameter(
RecordWriteDescriptor::kRememberedSet));
Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
@ -628,7 +626,6 @@ void TurboAssembler::CallRecordWriteStub(
Pop(slot_parameter);
Pop(object_parameter);
Move(isolate_parameter, ExternalReference::isolate_address(isolate()));
Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
Call(callable.code(), RelocInfo::CODE_TARGET);
@ -1520,7 +1517,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// Clear the new.target register if not given.
if (!new_target.is_valid()) {
LoadRoot(r3, Heap::kUndefinedValueRootIndex);
LoadRoot(r3, RootIndex::kUndefinedValue);
}
Label done;
@ -1642,9 +1639,7 @@ void MacroAssembler::CompareInstanceType(Register map,
cmp(type_reg, Operand(type));
}
void MacroAssembler::CompareRoot(Register obj,
Heap::RootListIndex index) {
void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(obj != scratch);
@ -2053,7 +2048,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
if (emit_debug_code()) {
Label done_checking;
AssertNotSmi(object);
CompareRoot(object, Heap::kUndefinedValueRootIndex);
CompareRoot(object, RootIndex::kUndefinedValue);
b(eq, &done_checking);
ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
CompareInstanceType(scratch, scratch, ALLOCATION_SITE_TYPE);

View File

@ -71,6 +71,9 @@ enum TargetAddressStorageMode {
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public:
TurboAssembler(const AssemblerOptions& options, void* buffer, int buffer_size)
: TurboAssemblerBase(options, buffer, buffer_size) {}
TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
void* buffer, int buffer_size,
CodeObjectRequired create_code_object)
@ -481,11 +484,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
}
// Load an object from the root table.
void LoadRoot(Register destination, Heap::RootListIndex index) override {
void LoadRoot(Register destination, RootIndex index) override {
LoadRoot(destination, index, al);
}
void LoadRoot(Register destination, Heap::RootListIndex index,
Condition cond);
void LoadRoot(Register destination, RootIndex index, Condition cond);
// Jump if the register contains a smi.
void JumpIfSmi(Register value, Label* smi_label);
@ -566,10 +568,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler : public TurboAssembler {
public:
MacroAssembler(const AssemblerOptions& options, void* buffer, int size)
: TurboAssembler(options, buffer, size) {}
MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
: MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer,
size, create_code_object) {}
MacroAssembler(Isolate* isolate, const AssemblerOptions& options,
void* buffer, int size, CodeObjectRequired create_code_object);
@ -713,8 +719,8 @@ class MacroAssembler : public TurboAssembler {
// Compare the object in a register to a value from the root list.
// Acquires a scratch register.
void CompareRoot(Register obj, Heap::RootListIndex index);
void PushRoot(Heap::RootListIndex index) {
void CompareRoot(Register obj, RootIndex index);
void PushRoot(RootIndex index) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
LoadRoot(scratch, index);
@ -722,14 +728,13 @@ class MacroAssembler : public TurboAssembler {
}
// Compare the object in a register to a value and jump if they are equal.
void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) {
void JumpIfRoot(Register with, RootIndex index, Label* if_equal) {
CompareRoot(with, index);
b(eq, if_equal);
}
// Compare the object in a register to a value and jump if they are not equal.
void JumpIfNotRoot(Register with, Heap::RootListIndex index,
Label* if_not_equal) {
void JumpIfNotRoot(Register with, RootIndex index, Label* if_not_equal) {
CompareRoot(with, index);
b(ne, if_not_equal);
}

View File

@ -3212,15 +3212,15 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
DecodeVCMP(instr);
} else if (((instr->Opc2Value() == 0x1)) && (instr->Opc3Value() == 0x3)) {
// vsqrt
lazily_initialize_fast_sqrt(isolate_);
lazily_initialize_fast_sqrt();
if (instr->SzValue() == 0x1) {
double dm_value = get_double_from_d_register(vm).get_scalar();
double dd_value = fast_sqrt(dm_value, isolate_);
double dd_value = fast_sqrt(dm_value);
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
float sm_value = get_float_from_s_register(m).get_scalar();
float sd_value = fast_sqrt(sm_value, isolate_);
float sd_value = fast_sqrt(sm_value);
sd_value = canonicalizeNaN(sd_value);
set_s_register_from_float(d, sd_value);
}
@ -5282,10 +5282,10 @@ void Simulator::DecodeSpecialCondition(Instruction* instr) {
src[i] = bit_cast<uint32_t>(result);
}
} else {
lazily_initialize_fast_sqrt(isolate_);
lazily_initialize_fast_sqrt();
for (int i = 0; i < 4; i++) {
float radicand = bit_cast<float>(src[i]);
float result = 1.0f / fast_sqrt(radicand, isolate_);
float result = 1.0f / fast_sqrt(radicand);
result = canonicalizeNaN(result);
src[i] = bit_cast<uint32_t>(result);
}

View File

@ -341,7 +341,9 @@ Immediate Operand::immediate_for_heap_object_request() const {
DCHECK((heap_object_request().kind() == HeapObjectRequest::kHeapNumber &&
immediate_.rmode() == RelocInfo::EMBEDDED_OBJECT) ||
(heap_object_request().kind() == HeapObjectRequest::kCodeStub &&
immediate_.rmode() == RelocInfo::CODE_TARGET));
immediate_.rmode() == RelocInfo::CODE_TARGET) ||
(heap_object_request().kind() == HeapObjectRequest::kStringConstant &&
immediate_.rmode() == RelocInfo::EMBEDDED_OBJECT));
return immediate_;
}

View File

@ -36,6 +36,7 @@
#include "src/code-stubs.h"
#include "src/frame-constants.h"
#include "src/register-configuration.h"
#include "src/string-constants.h"
namespace v8 {
namespace internal {
@ -583,6 +584,7 @@ void Assembler::Reset() {
}
void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty());
for (auto& request : heap_object_requests_) {
Address pc = reinterpret_cast<Address>(buffer_) + request.offset();
switch (request.kind()) {
@ -601,6 +603,13 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
request.code_stub()->GetCode());
break;
}
case HeapObjectRequest::kStringConstant: {
const StringConstantBase* str = request.string();
CHECK_NOT_NULL(str);
set_target_address_at(pc, 0 /* unused */,
str->AllocateStringConstant(isolate).address());
break;
}
}
}
}
@ -1717,6 +1726,13 @@ Operand Operand::EmbeddedCode(CodeStub* stub) {
return result;
}
Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
Operand result(0, RelocInfo::EMBEDDED_OBJECT);
result.heap_object_request_.emplace(str);
DCHECK(result.IsHeapObjectRequest());
return result;
}
void Assembler::ldr(const CPURegister& rt, const Operand& operand) {
if (operand.IsHeapObjectRequest()) {
RequestHeapObject(operand.heap_object_request());
@ -4751,14 +4767,6 @@ void Assembler::GrowBuffer() {
void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
ConstantPoolMode constant_pool_mode) {
// Non-relocatable constants should not end up in the literal pool.
DCHECK(!RelocInfo::IsNone(rmode));
if (options().disable_reloc_info_for_patching) return;
// We do not try to reuse pool constants.
RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, nullptr);
bool write_reloc_info = true;
if ((rmode == RelocInfo::COMMENT) ||
(rmode == RelocInfo::INTERNAL_REFERENCE) ||
(rmode == RelocInfo::CONST_POOL) || (rmode == RelocInfo::VENEER_POOL) ||
@ -4772,23 +4780,22 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode));
// These modes do not need an entry in the constant pool.
} else if (constant_pool_mode == NEEDS_POOL_ENTRY) {
write_reloc_info = constpool_.RecordEntry(data, rmode);
bool new_constpool_entry = constpool_.RecordEntry(data, rmode);
// Make sure the constant pool is not emitted in place of the next
// instruction for which we just recorded relocation info.
BlockConstPoolFor(1);
if (!new_constpool_entry) return;
}
// For modes that cannot use the constant pool, a different sequence of
// instructions will be emitted by this function's caller.
if (write_reloc_info) {
// Don't record external references unless the heap will be serialized.
if (RelocInfo::IsOnlyForSerializer(rmode) &&
!options().record_reloc_info_for_serialization && !emit_debug_code()) {
return;
}
DCHECK_GE(buffer_space(), kMaxRelocSize); // too late to grow buffer here
reloc_info_writer.Write(&rinfo);
}
if (!ShouldRecordRelocInfo(rmode)) return;
// We do not try to reuse pool constants.
RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, nullptr);
DCHECK_GE(buffer_space(), kMaxRelocSize); // too late to grow buffer here
reloc_info_writer.Write(&rinfo);
}
void Assembler::near_jump(int offset, RelocInfo::Mode rmode) {

View File

@ -718,6 +718,7 @@ class Operand {
static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
static Operand EmbeddedCode(CodeStub* stub);
static Operand EmbeddedStringConstant(const StringConstantBase* str);
inline bool IsHeapObjectRequest() const;
inline HeapObjectRequest heap_object_request() const;
@ -3624,8 +3625,7 @@ class PatchingAssembler : public Assembler {
void PatchSubSp(uint32_t immediate);
};
class EnsureSpace BASE_EMBEDDED {
class EnsureSpace {
public:
explicit EnsureSpace(Assembler* assembler) {
assembler->CheckBufferSpace();

View File

@ -124,7 +124,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
IsolateAddressId::kPendingExceptionAddress, isolate())));
}
__ Str(code_entry, MemOperand(x10));
__ LoadRoot(x0, Heap::kExceptionRootIndex);
__ LoadRoot(x0, RootIndex::kException);
__ B(&exit);
// Invoke: Link this frame into the handler chain.
@ -434,8 +434,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm,
// Check if the function scheduled an exception.
__ Mov(x5, ExternalReference::scheduled_exception_address(isolate));
__ Ldr(x5, MemOperand(x5));
__ JumpIfNotRoot(x5, Heap::kTheHoleValueRootIndex,
&promote_scheduled_exception);
__ JumpIfNotRoot(x5, RootIndex::kTheHoleValue, &promote_scheduled_exception);
__ DropSlots(stack_space);
__ Ret();
@ -484,7 +483,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(FCA::kHolderIndex == 0);
Register undef = x7;
__ LoadRoot(undef, Heap::kUndefinedValueRootIndex);
__ LoadRoot(undef, RootIndex::kUndefinedValue);
// Push new target, call data.
__ Push(undef, call_data);
@ -562,7 +561,7 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
name));
__ Ldr(data, FieldMemOperand(callback, AccessorInfo::kDataOffset));
__ LoadRoot(undef, Heap::kUndefinedValueRootIndex);
__ LoadRoot(undef, RootIndex::kUndefinedValue);
__ Mov(isolate_address, ExternalReference::isolate_address(isolate()));
__ Ldr(name, FieldMemOperand(callback, AccessorInfo::kNameOffset));

View File

@ -8,7 +8,6 @@
#include "src/arm64/macro-assembler-arm64-inl.h"
#include "src/arm64/simulator-arm64.h"
#include "src/codegen.h"
#include "src/isolate.h"
#include "src/macro-assembler.h"
namespace v8 {
@ -16,9 +15,7 @@ namespace internal {
#define __ ACCESS_MASM(masm)
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
return nullptr;
}
UnaryMathFunction CreateSqrtFunction() { return nullptr; }
#undef __

View File

@ -291,10 +291,8 @@ M_(FPCR, AHP_mask | DN_mask | FZ_mask | RMode_mask)
const uint32_t Name##_mask = ((1 << Name##_width) - 1) << LowBit;
#define DECLARE_INSTRUCTION_FIELDS_OFFSETS(Name, HighBit, LowBit, unused_1) \
DECLARE_FIELDS_OFFSETS(Name, HighBit, LowBit, unused_1, unused_2)
#define NOTHING(A, B)
INSTRUCTION_FIELDS_LIST(DECLARE_INSTRUCTION_FIELDS_OFFSETS)
SYSTEM_REGISTER_FIELDS_LIST(DECLARE_FIELDS_OFFSETS, NOTHING)
#undef NOTHING
#undef DECLARE_FIELDS_OFFSETS
#undef DECLARE_INSTRUCTION_FIELDS_OFFSETS

View File

@ -89,9 +89,9 @@ void CallVarargsDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
// x0 : number of arguments (on the stack, not including receiver)
// x1 : the target to call
// x2 : arguments list (FixedArray)
// x4 : arguments list length (untagged)
Register registers[] = {x1, x0, x2, x4};
// x2 : arguments list (FixedArray)
Register registers[] = {x1, x0, x4, x2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@ -126,9 +126,9 @@ void ConstructVarargsDescriptor::InitializePlatformSpecific(
// x0 : number of arguments (on the stack, not including receiver)
// x1 : the target to call
// x3 : the new target
// x2 : arguments list (FixedArray)
// x4 : arguments list length (untagged)
Register registers[] = {x1, x3, x0, x2, x4};
// x2 : arguments list (FixedArray)
Register registers[] = {x1, x3, x0, x4, x2};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
@ -198,7 +198,7 @@ void BinaryOpDescriptor::InitializePlatformSpecific(
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ArgumentAdaptorDescriptor::InitializePlatformSpecific(
void ArgumentsAdaptorDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
x1, // JSFunction
@ -242,10 +242,10 @@ void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {
x0, // argument count (not including receiver)
x3, // new target
x4, // address of the first argument
x1, // constructor to call
x3, // new target
x2, // allocation site feedback if available, undefined otherwise
x4 // address of the first argument
};
data->InitializePlatformSpecific(arraysize(registers), registers);
}

View File

@ -1516,7 +1516,7 @@ void TurboAssembler::CanonicalizeNaN(const VRegister& dst,
Fsub(dst, src, fp_zero);
}
void TurboAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
// TODO(jbramley): Most root values are constants, and can be synthesized
// without a load. Refer to the ARM back end for details.
Ldr(destination, MemOperand(kRootRegister, RootRegisterOffset(index)));
@ -1646,7 +1646,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
Register scratch = temps.AcquireX();
Label done_checking;
AssertNotSmi(object);
JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &done_checking);
JumpIfRoot(object, RootIndex::kUndefinedValue, &done_checking);
Ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
CompareInstanceType(scratch, scratch, ALLOCATION_SITE_TYPE);
Assert(eq, AbortReason::kExpectedUndefinedOrCell);
@ -1727,7 +1727,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
}
void MacroAssembler::JumpToInstructionStream(Address entry) {
Mov(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Ldr(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Br(kOffHeapTrampolineRegister);
}
@ -1806,8 +1806,8 @@ void TurboAssembler::CallCFunction(Register function, int num_of_reg_args,
void TurboAssembler::LoadFromConstantsTable(Register destination,
int constant_index) {
DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(
Heap::kBuiltinsConstantsTableRootIndex));
LoadRoot(destination, Heap::kBuiltinsConstantsTableRootIndex);
RootIndex::kBuiltinsConstantsTable));
LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
Ldr(destination,
FieldMemOperand(destination,
FixedArray::kHeaderSize + constant_index * kPointerSize));
@ -1905,7 +1905,7 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
Register scratch = temps.AcquireX();
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
Mov(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Ldr(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Jump(scratch, cond);
return;
}
@ -1963,7 +1963,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode) {
Register scratch = temps.AcquireX();
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
Mov(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Ldr(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Call(scratch);
return;
}
@ -2225,7 +2225,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
// Clear the new.target register if not given.
if (!new_target.is_valid()) {
LoadRoot(x3, Heap::kUndefinedValueRootIndex);
LoadRoot(x3, RootIndex::kUndefinedValue);
}
Label done;
@ -2597,8 +2597,7 @@ void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) {
DecodeField<Map::ElementsKindBits>(result);
}
void MacroAssembler::CompareRoot(const Register& obj,
Heap::RootListIndex index) {
void MacroAssembler::CompareRoot(const Register& obj, RootIndex index) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
DCHECK(!AreAliased(obj, temp));
@ -2606,17 +2605,13 @@ void MacroAssembler::CompareRoot(const Register& obj,
Cmp(obj, temp);
}
void MacroAssembler::JumpIfRoot(const Register& obj,
Heap::RootListIndex index,
void MacroAssembler::JumpIfRoot(const Register& obj, RootIndex index,
Label* if_equal) {
CompareRoot(obj, index);
B(eq, if_equal);
}
void MacroAssembler::JumpIfNotRoot(const Register& obj,
Heap::RootListIndex index,
void MacroAssembler::JumpIfNotRoot(const Register& obj, RootIndex index,
Label* if_not_equal) {
CompareRoot(obj, index);
B(ne, if_not_equal);
@ -2823,8 +2818,6 @@ void TurboAssembler::CallRecordWriteStub(
RecordWriteDescriptor::kObject));
Register slot_parameter(
callable.descriptor().GetRegisterParameter(RecordWriteDescriptor::kSlot));
Register isolate_parameter(callable.descriptor().GetRegisterParameter(
RecordWriteDescriptor::kIsolate));
Register remembered_set_parameter(callable.descriptor().GetRegisterParameter(
RecordWriteDescriptor::kRememberedSet));
Register fp_mode_parameter(callable.descriptor().GetRegisterParameter(
@ -2834,7 +2827,6 @@ void TurboAssembler::CallRecordWriteStub(
Pop(slot_parameter, object_parameter);
Mov(isolate_parameter, ExternalReference::isolate_address(isolate()));
Mov(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
Mov(fp_mode_parameter, Smi::FromEnum(fp_mode));
Call(callable.code(), RelocInfo::CODE_TARGET);
@ -2915,8 +2907,7 @@ void TurboAssembler::AssertUnreachable(AbortReason reason) {
if (emit_debug_code()) Abort(reason);
}
void MacroAssembler::AssertRegisterIsRoot(Register reg,
Heap::RootListIndex index,
void MacroAssembler::AssertRegisterIsRoot(Register reg, RootIndex index,
AbortReason reason) {
if (emit_debug_code()) {
CompareRoot(reg, index);

View File

@ -180,6 +180,9 @@ enum PreShiftImmMode {
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
public:
TurboAssembler(const AssemblerOptions& options, void* buffer, int buffer_size)
: TurboAssemblerBase(options, buffer, buffer_size) {}
TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
void* buffer, int buffer_size,
CodeObjectRequired create_code_object)
@ -1126,7 +1129,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
#undef DECLARE_FUNCTION
// Load an object from the root table.
void LoadRoot(Register destination, Heap::RootListIndex index) override;
void LoadRoot(Register destination, RootIndex index) override;
inline void Ret(const Register& xn = lr);
@ -1262,10 +1265,14 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
class MacroAssembler : public TurboAssembler {
public:
MacroAssembler(const AssemblerOptions& options, void* buffer, int size)
: TurboAssembler(options, buffer, size) {}
MacroAssembler(Isolate* isolate, void* buffer, int size,
CodeObjectRequired create_code_object)
: MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer,
size, create_code_object) {}
MacroAssembler(Isolate* isolate, const AssemblerOptions& options,
void* buffer, int size, CodeObjectRequired create_code_object);
@ -1821,17 +1828,13 @@ class MacroAssembler : public TurboAssembler {
void LoadElementsKindFromMap(Register result, Register map);
// Compare the object in a register to a value from the root list.
void CompareRoot(const Register& obj, Heap::RootListIndex index);
void CompareRoot(const Register& obj, RootIndex index);
// Compare the object in a register to a value and jump if they are equal.
void JumpIfRoot(const Register& obj,
Heap::RootListIndex index,
Label* if_equal);
void JumpIfRoot(const Register& obj, RootIndex index, Label* if_equal);
// Compare the object in a register to a value and jump if they are not equal.
void JumpIfNotRoot(const Register& obj,
Heap::RootListIndex index,
Label* if_not_equal);
void JumpIfNotRoot(const Register& obj, RootIndex index, Label* if_not_equal);
// Compare the contents of a register with an operand, and branch to true,
// false or fall through, depending on condition.
@ -1944,7 +1947,7 @@ class MacroAssembler : public TurboAssembler {
// Debugging.
void AssertRegisterIsRoot(
Register reg, Heap::RootListIndex index,
Register reg, RootIndex index,
AbortReason reason = AbortReason::kRegisterDidNotMatchExpectedRoot);
// Abort if the specified register contains the invalid color bit pattern.
@ -2025,7 +2028,7 @@ class MacroAssembler : public TurboAssembler {
// instructions. This scope prevents the MacroAssembler from being called and
// literal pools from being emitted. It also asserts the number of instructions
// emitted is what you specified when creating the scope.
class InstructionAccurateScope BASE_EMBEDDED {
class InstructionAccurateScope {
public:
explicit InstructionAccurateScope(TurboAssembler* tasm, size_t count = 0)
: tasm_(tasm)

View File

@ -391,7 +391,7 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(Isolate* isolate,
return MaybeHandle<Object>();
}
memory->set_is_growable(false);
size_t size = NumberToSize(memory->byte_length());
size_t size = memory->byte_length();
// Check the asm.js heap size against the valid limits.
if (!IsValidAsmjsMemorySize(size)) {
ReportInstantiationFailure(script, position, "Invalid heap size");

View File

@ -44,10 +44,24 @@
#include "src/simulator.h" // For flushing instruction cache.
#include "src/snapshot/serializer-common.h"
#include "src/snapshot/snapshot.h"
#include "src/string-constants.h"
namespace v8 {
namespace internal {
AssemblerOptions AssemblerOptions::EnableV8AgnosticCode() const {
AssemblerOptions options = *this;
options.v8_agnostic_code = true;
options.record_reloc_info_for_serialization = false;
options.enable_root_array_delta_access = false;
// Inherit |enable_simulator_code| value.
options.isolate_independent_code = false;
options.inline_offheap_trampolines = false;
// Inherit |code_range_start| value.
// Inherit |use_pc_relative_calls_and_jumps| value.
return options;
}
AssemblerOptions AssemblerOptions::Default(
Isolate* isolate, bool explicitly_support_serialization) {
AssemblerOptions options;
@ -61,9 +75,12 @@ AssemblerOptions AssemblerOptions::Default(
options.enable_simulator_code = !serializer;
#endif
options.inline_offheap_trampolines = !serializer;
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
options.code_range_start =
isolate->heap()->memory_allocator()->code_range()->start();
const base::AddressRegion& code_range =
isolate->heap()->memory_allocator()->code_range();
DCHECK_IMPLIES(code_range.begin() != kNullAddress, !code_range.is_empty());
options.code_range_start = code_range.begin();
#endif
return options;
}
@ -355,6 +372,13 @@ HeapObjectRequest::HeapObjectRequest(CodeStub* code_stub, int offset)
DCHECK_NOT_NULL(value_.code_stub);
}
HeapObjectRequest::HeapObjectRequest(const StringConstantBase* string,
int offset)
: kind_(kStringConstant), offset_(offset) {
value_.string = string;
DCHECK_NOT_NULL(value_.string);
}
// Platform specific but identical code for all the platforms.
void Assembler::RecordDeoptReason(DeoptimizeReason reason,
@ -381,11 +405,13 @@ void Assembler::DataAlign(int m) {
}
void AssemblerBase::RequestHeapObject(HeapObjectRequest request) {
DCHECK(!options().v8_agnostic_code);
request.set_offset(pc_offset());
heap_object_requests_.push_front(request);
}
int AssemblerBase::AddCodeTarget(Handle<Code> target) {
DCHECK(!options().v8_agnostic_code);
int current = static_cast<int>(code_targets_.size());
if (current > 0 && !target.is_null() &&
code_targets_.back().address() == target.address()) {
@ -398,6 +424,7 @@ int AssemblerBase::AddCodeTarget(Handle<Code> target) {
}
Handle<Code> AssemblerBase::GetCodeTarget(intptr_t code_target_index) const {
DCHECK(!options().v8_agnostic_code);
DCHECK_LE(0, code_target_index);
DCHECK_LT(code_target_index, code_targets_.size());
return code_targets_[code_target_index];
@ -405,6 +432,7 @@ Handle<Code> AssemblerBase::GetCodeTarget(intptr_t code_target_index) const {
void AssemblerBase::UpdateCodeTarget(intptr_t code_target_index,
Handle<Code> code) {
DCHECK(!options().v8_agnostic_code);
DCHECK_LE(0, code_target_index);
DCHECK_LT(code_target_index, code_targets_.size());
code_targets_[code_target_index] = code;

View File

@ -67,6 +67,7 @@ class Isolate;
class SCTableReference;
class SourcePosition;
class StatsCounter;
class StringConstantBase;
// -----------------------------------------------------------------------------
// Optimization for far-jmp like instructions that can be replaced by shorter.
@ -97,8 +98,9 @@ class HeapObjectRequest {
public:
explicit HeapObjectRequest(double heap_number, int offset = -1);
explicit HeapObjectRequest(CodeStub* code_stub, int offset = -1);
explicit HeapObjectRequest(const StringConstantBase* string, int offset = -1);
enum Kind { kHeapNumber, kCodeStub };
enum Kind { kHeapNumber, kCodeStub, kStringConstant };
Kind kind() const { return kind_; }
double heap_number() const {
@ -111,6 +113,11 @@ class HeapObjectRequest {
return value_.code_stub;
}
const StringConstantBase* string() const {
DCHECK_EQ(kind(), kStringConstant);
return value_.string;
}
// The code buffer offset at the time of the request.
int offset() const {
DCHECK_GE(offset_, 0);
@ -128,6 +135,7 @@ class HeapObjectRequest {
union {
double heap_number;
CodeStub* code_stub;
const StringConstantBase* string;
} value_;
int offset_;
@ -139,6 +147,9 @@ class HeapObjectRequest {
enum class CodeObjectRequired { kNo, kYes };
struct V8_EXPORT_PRIVATE AssemblerOptions {
// Prohibits using any V8-specific features of assembler like (isolates,
// heap objects, external references, etc.).
bool v8_agnostic_code = false;
// Recording reloc info for external references and off-heap targets is
// needed whenever code is serialized, e.g. into the snapshot or as a WASM
// module. This flag allows this reloc info to be disabled for code that
@ -168,6 +179,9 @@ struct V8_EXPORT_PRIVATE AssemblerOptions {
// the instruction immediates.
bool use_pc_relative_calls_and_jumps = false;
// Constructs V8-agnostic set of options from current state.
AssemblerOptions EnableV8AgnosticCode() const;
static AssemblerOptions Default(
Isolate* isolate, bool explicitly_support_serialization = false);
};
@ -268,13 +282,23 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
}
}
// {RequestHeapObject} records the need for a future heap number allocation or
// code stub generation. After code assembly, each platform's
// {Assembler::AllocateAndInstallRequestedHeapObjects} will allocate these
// objects and place them where they are expected (determined by the pc offset
// associated with each request).
// {RequestHeapObject} records the need for a future heap number allocation,
// code stub generation or string allocation. After code assembly, each
// platform's {Assembler::AllocateAndInstallRequestedHeapObjects} will
// allocate these objects and place them where they are expected (determined
// by the pc offset associated with each request).
void RequestHeapObject(HeapObjectRequest request);
bool ShouldRecordRelocInfo(RelocInfo::Mode rmode) const {
DCHECK(!RelocInfo::IsNone(rmode));
if (options().disable_reloc_info_for_patching) return false;
if (RelocInfo::IsOnlyForSerializer(rmode) &&
!options().record_reloc_info_for_serialization && !emit_debug_code()) {
return false;
}
return true;
}
private:
// Before we copy code into the code space, we sometimes cannot encode
// call/jump code targets as we normally would, as the difference between the
@ -301,7 +325,7 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
};
// Avoids emitting debug code during the lifetime of this scope object.
class DontEmitDebugCodeScope BASE_EMBEDDED {
class DontEmitDebugCodeScope {
public:
explicit DontEmitDebugCodeScope(AssemblerBase* assembler)
: assembler_(assembler), old_value_(assembler->emit_debug_code()) {
@ -332,7 +356,7 @@ class PredictableCodeSizeScope {
// Enable a specified feature within a scope.
class CpuFeatureScope BASE_EMBEDDED {
class CpuFeatureScope {
public:
enum CheckPolicy {
kCheckSupported,
@ -350,12 +374,12 @@ class CpuFeatureScope BASE_EMBEDDED {
#else
CpuFeatureScope(AssemblerBase* assembler, CpuFeature f,
CheckPolicy check = kCheckSupported) {}
// Define a destructor to avoid unused variable warnings.
~CpuFeatureScope() {}
~CpuFeatureScope() { // NOLINT (modernize-use-equals-default)
// Define a destructor to avoid unused variable warnings.
}
#endif
};
// CpuFeatures keeps track of which features are supported by the target CPU.
// Supported features must be enabled by a CpuFeatureScope before use.
// Example:
@ -420,7 +444,7 @@ class CpuFeatures : public AllStatic {
// Utility functions
// Computes pow(x, y) with the special cases in the spec for Math.pow.
double power_helper(Isolate* isolate, double x, double y);
double power_helper(double x, double y);
double power_double_int(double x, int y);
double power_double_double(double x, double y);
@ -430,7 +454,7 @@ double power_double_double(double x, double y);
class ConstantPoolEntry {
public:
ConstantPoolEntry() {}
ConstantPoolEntry() = default;
ConstantPoolEntry(int position, intptr_t value, bool sharing_ok,
RelocInfo::Mode rmode = RelocInfo::NONE)
: position_(position),
@ -447,7 +471,7 @@ class ConstantPoolEntry {
int position() const { return position_; }
bool sharing_ok() const { return merged_index_ != SHARING_PROHIBITED; }
bool is_merged() const { return merged_index_ >= 0; }
int merged_index(void) const {
int merged_index() const {
DCHECK(is_merged());
return merged_index_;
}
@ -456,7 +480,7 @@ class ConstantPoolEntry {
merged_index_ = index;
DCHECK(is_merged());
}
int offset(void) const {
int offset() const {
DCHECK_GE(merged_index_, 0);
return merged_index_;
}
@ -493,7 +517,7 @@ class ConstantPoolEntry {
// -----------------------------------------------------------------------------
// Embedded constant pool support
class ConstantPoolBuilder BASE_EMBEDDED {
class ConstantPoolBuilder {
public:
ConstantPoolBuilder(int ptr_reach_bits, int double_reach_bits);

View File

@ -77,7 +77,9 @@ class PerThreadAssertScopeDebugOnly : public
#else
class PerThreadAssertScopeDebugOnly {
public:
PerThreadAssertScopeDebugOnly() { }
PerThreadAssertScopeDebugOnly() { // NOLINT (modernize-use-equals-default)
// Define a constructor to avoid unused variable warnings.
}
void Release() {}
#endif
};

View File

@ -14,7 +14,7 @@ AstFunctionLiteralIdReindexer::AstFunctionLiteralIdReindexer(size_t stack_limit,
int delta)
: AstTraversalVisitor(stack_limit), delta_(delta) {}
AstFunctionLiteralIdReindexer::~AstFunctionLiteralIdReindexer() {}
AstFunctionLiteralIdReindexer::~AstFunctionLiteralIdReindexer() = default;
void AstFunctionLiteralIdReindexer::Reindex(Expression* pattern) {
Visit(pattern);

View File

@ -21,8 +21,9 @@ struct SourceRange {
static SourceRange OpenEnded(int32_t start) {
return SourceRange(start, kNoSourcePosition);
}
static SourceRange ContinuationOf(const SourceRange& that) {
return that.IsEmpty() ? Empty() : OpenEnded(that.end);
static SourceRange ContinuationOf(const SourceRange& that,
int end = kNoSourcePosition) {
return that.IsEmpty() ? Empty() : SourceRange(that.end, end);
}
int32_t start, end;
};
@ -56,7 +57,7 @@ enum class SourceRangeKind {
class AstNodeSourceRanges : public ZoneObject {
public:
virtual ~AstNodeSourceRanges() {}
virtual ~AstNodeSourceRanges() = default;
virtual SourceRange GetRange(SourceRangeKind kind) = 0;
};
@ -65,7 +66,7 @@ class BinaryOperationSourceRanges final : public AstNodeSourceRanges {
explicit BinaryOperationSourceRanges(const SourceRange& right_range)
: right_range_(right_range) {}
SourceRange GetRange(SourceRangeKind kind) {
SourceRange GetRange(SourceRangeKind kind) override {
DCHECK_EQ(kind, SourceRangeKind::kRight);
return right_range_;
}
@ -79,7 +80,7 @@ class ContinuationSourceRanges : public AstNodeSourceRanges {
explicit ContinuationSourceRanges(int32_t continuation_position)
: continuation_position_(continuation_position) {}
SourceRange GetRange(SourceRangeKind kind) {
SourceRange GetRange(SourceRangeKind kind) override {
DCHECK_EQ(kind, SourceRangeKind::kContinuation);
return SourceRange::OpenEnded(continuation_position_);
}
@ -99,7 +100,7 @@ class CaseClauseSourceRanges final : public AstNodeSourceRanges {
explicit CaseClauseSourceRanges(const SourceRange& body_range)
: body_range_(body_range) {}
SourceRange GetRange(SourceRangeKind kind) {
SourceRange GetRange(SourceRangeKind kind) override {
DCHECK_EQ(kind, SourceRangeKind::kBody);
return body_range_;
}
@ -114,7 +115,7 @@ class ConditionalSourceRanges final : public AstNodeSourceRanges {
const SourceRange& else_range)
: then_range_(then_range), else_range_(else_range) {}
SourceRange GetRange(SourceRangeKind kind) {
SourceRange GetRange(SourceRangeKind kind) override {
switch (kind) {
case SourceRangeKind::kThen:
return then_range_;
@ -136,7 +137,7 @@ class IfStatementSourceRanges final : public AstNodeSourceRanges {
const SourceRange& else_range)
: then_range_(then_range), else_range_(else_range) {}
SourceRange GetRange(SourceRangeKind kind) {
SourceRange GetRange(SourceRangeKind kind) override {
switch (kind) {
case SourceRangeKind::kElse:
return else_range_;
@ -162,7 +163,7 @@ class IterationStatementSourceRanges final : public AstNodeSourceRanges {
explicit IterationStatementSourceRanges(const SourceRange& body_range)
: body_range_(body_range) {}
SourceRange GetRange(SourceRangeKind kind) {
SourceRange GetRange(SourceRangeKind kind) override {
switch (kind) {
case SourceRangeKind::kBody:
return body_range_;
@ -198,7 +199,7 @@ class NaryOperationSourceRanges final : public AstNodeSourceRanges {
void AddRange(const SourceRange& range) { ranges_.push_back(range); }
size_t RangeCount() const { return ranges_.size(); }
SourceRange GetRange(SourceRangeKind kind) { UNREACHABLE(); }
SourceRange GetRange(SourceRangeKind kind) override { UNREACHABLE(); }
private:
ZoneVector<SourceRange> ranges_;
@ -227,7 +228,7 @@ class TryCatchStatementSourceRanges final : public AstNodeSourceRanges {
explicit TryCatchStatementSourceRanges(const SourceRange& catch_range)
: catch_range_(catch_range) {}
SourceRange GetRange(SourceRangeKind kind) {
SourceRange GetRange(SourceRangeKind kind) override {
switch (kind) {
case SourceRangeKind::kCatch:
return catch_range_;
@ -247,7 +248,7 @@ class TryFinallyStatementSourceRanges final : public AstNodeSourceRanges {
explicit TryFinallyStatementSourceRanges(const SourceRange& finally_range)
: finally_range_(finally_range) {}
SourceRange GetRange(SourceRangeKind kind) {
SourceRange GetRange(SourceRangeKind kind) override {
switch (kind) {
case SourceRangeKind::kFinally:
return finally_range_;

View File

@ -242,6 +242,17 @@ const AstRawString* AstValueFactory::GetString(Handle<String> literal) {
return result;
}
const AstRawString* AstValueFactory::CloneFromOtherFactory(
const AstRawString* raw_string) {
const AstRawString* result = GetString(
raw_string->hash_field(), raw_string->is_one_byte(),
Vector<const byte>(raw_string->raw_data(), raw_string->byte_length()));
// Check we weren't trying to clone a string that was already in this
// ast-value-factory.
DCHECK_NE(result, raw_string);
return result;
}
AstConsString* AstValueFactory::NewConsString() {
AstConsString* new_string = new (zone_) AstConsString;
DCHECK_NOT_NULL(new_string);

View File

@ -194,48 +194,49 @@ class AstBigInt {
};
// For generating constants.
#define AST_STRING_CONSTANTS(F) \
F(anonymous_function, "(anonymous function)") \
F(arguments, "arguments") \
F(async, "async") \
F(await, "await") \
F(bigint, "bigint") \
F(boolean, "boolean") \
F(constructor, "constructor") \
F(default, "default") \
F(done, "done") \
F(dot, ".") \
F(dot_for, ".for") \
F(dot_generator_object, ".generator_object") \
F(dot_iterator, ".iterator") \
F(dot_result, ".result") \
F(dot_switch_tag, ".switch_tag") \
F(dot_catch, ".catch") \
F(empty, "") \
F(eval, "eval") \
F(function, "function") \
F(get_space, "get ") \
F(length, "length") \
F(let, "let") \
F(name, "name") \
F(native, "native") \
F(new_target, ".new.target") \
F(next, "next") \
F(number, "number") \
F(object, "object") \
F(proto, "__proto__") \
F(prototype, "prototype") \
F(return, "return") \
F(set_space, "set ") \
F(star_default_star, "*default*") \
F(string, "string") \
F(symbol, "symbol") \
F(this, "this") \
F(this_function, ".this_function") \
F(throw, "throw") \
F(undefined, "undefined") \
F(use_asm, "use asm") \
F(use_strict, "use strict") \
#define AST_STRING_CONSTANTS(F) \
F(anonymous_function, "(anonymous function)") \
F(arguments, "arguments") \
F(async, "async") \
F(await, "await") \
F(bigint, "bigint") \
F(boolean, "boolean") \
F(constructor, "constructor") \
F(default, "default") \
F(done, "done") \
F(dot, ".") \
F(dot_for, ".for") \
F(dot_generator_object, ".generator_object") \
F(dot_iterator, ".iterator") \
F(dot_promise, ".promise") \
F(dot_result, ".result") \
F(dot_switch_tag, ".switch_tag") \
F(dot_catch, ".catch") \
F(empty, "") \
F(eval, "eval") \
F(function, "function") \
F(get_space, "get ") \
F(length, "length") \
F(let, "let") \
F(name, "name") \
F(native, "native") \
F(new_target, ".new.target") \
F(next, "next") \
F(number, "number") \
F(object, "object") \
F(proto, "__proto__") \
F(prototype, "prototype") \
F(return, "return") \
F(set_space, "set ") \
F(star_default_star, "*default*") \
F(string, "string") \
F(symbol, "symbol") \
F(this, "this") \
F(this_function, ".this_function") \
F(throw, "throw") \
F(undefined, "undefined") \
F(use_asm, "use asm") \
F(use_strict, "use strict") \
F(value, "value")
class AstStringConstants final {
@ -297,10 +298,15 @@ class AstValueFactory {
return GetTwoByteStringInternal(literal);
}
const AstRawString* GetString(Handle<String> literal);
// Clones an AstRawString from another ast value factory, adding it to this
// factory and returning the clone.
const AstRawString* CloneFromOtherFactory(const AstRawString* raw_string);
V8_EXPORT_PRIVATE AstConsString* NewConsString();
AstConsString* NewConsString(const AstRawString* str);
AstConsString* NewConsString(const AstRawString* str1,
const AstRawString* str2);
V8_EXPORT_PRIVATE AstConsString* NewConsString(const AstRawString* str);
V8_EXPORT_PRIVATE AstConsString* NewConsString(const AstRawString* str1,
const AstRawString* str2);
V8_EXPORT_PRIVATE void Internalize(Isolate* isolate);

View File

@ -551,12 +551,6 @@ bool ObjectLiteral::IsFastCloningSupported() const {
ConstructorBuiltins::kMaximumClonedShallowObjectProperties;
}
bool ArrayLiteral::is_empty() const {
DCHECK(is_initialized());
return values()->is_empty() && (boilerplate_description().is_null() ||
boilerplate_description()->is_empty());
}
int ArrayLiteral::InitDepthAndFlags() {
if (is_initialized()) return depth();

38
deps/v8/src/ast/ast.h vendored
View File

@ -383,7 +383,7 @@ class DoExpression final : public Expression {
class Declaration : public AstNode {
public:
typedef ThreadedList<Declaration> List;
typedef base::ThreadedList<Declaration> List;
VariableProxy* proxy() const { return proxy_; }
@ -397,6 +397,7 @@ class Declaration : public AstNode {
Declaration** next() { return &next_; }
Declaration* next_;
friend List;
friend base::ThreadedListTraits<Declaration>;
};
class VariableDeclaration : public Declaration {
@ -1477,8 +1478,6 @@ class ArrayLiteral final : public AggregateLiteral {
int first_spread_index() const { return first_spread_index_; }
bool is_empty() const;
// Populate the depth field and flags, returns the depth.
int InitDepthAndFlags();
@ -1578,8 +1577,15 @@ class VariableProxy final : public Expression {
// Bind this proxy to the variable var.
void BindTo(Variable* var);
void set_next_unresolved(VariableProxy* next) { next_unresolved_ = next; }
VariableProxy* next_unresolved() { return next_unresolved_; }
V8_INLINE VariableProxy* next_unresolved() { return next_unresolved_; }
// Provides an access type for the ThreadedList used by the PreParsers
// expressions, lists, and formal parameters.
struct PreParserNext {
static VariableProxy** next(VariableProxy* t) {
return t->pre_parser_expr_next();
}
};
private:
friend class AstNodeFactory;
@ -1590,7 +1596,8 @@ class VariableProxy final : public Expression {
int start_position)
: Expression(start_position, kVariableProxy),
raw_name_(name),
next_unresolved_(nullptr) {
next_unresolved_(nullptr),
pre_parser_expr_next_(nullptr) {
bit_field_ |= IsThisField::encode(variable_kind == THIS_VARIABLE) |
IsAssignedField::encode(false) |
IsResolvedField::encode(false) |
@ -1613,9 +1620,15 @@ class VariableProxy final : public Expression {
const AstRawString* raw_name_; // if !is_resolved_
Variable* var_; // if is_resolved_
};
VariableProxy* next_unresolved_;
};
V8_INLINE VariableProxy** next() { return &next_unresolved_; }
VariableProxy* next_unresolved_;
VariableProxy** pre_parser_expr_next() { return &pre_parser_expr_next_; }
VariableProxy* pre_parser_expr_next_;
friend base::ThreadedListTraits<VariableProxy>;
};
// Left-hand side can only be a property, a global or a (parameter or local)
// slot.
@ -2248,7 +2261,7 @@ class FunctionLiteral final : public Expression {
void mark_as_iife() { bit_field_ = IIFEBit::update(bit_field_, true); }
bool is_iife() const { return IIFEBit::decode(bit_field_); }
bool is_top_level() const {
bool is_toplevel() const {
return function_literal_id() == FunctionLiteral::kIdTypeTopLevel;
}
bool is_wrapped() const { return function_type() == kWrapped; }
@ -2308,7 +2321,7 @@ class FunctionLiteral final : public Expression {
// - (function() { ... })();
// - var x = function() { ... }();
bool ShouldEagerCompile() const;
void SetShouldEagerCompile();
V8_EXPORT_PRIVATE void SetShouldEagerCompile();
FunctionType function_type() const {
return FunctionTypeBits::decode(bit_field_);
@ -2736,7 +2749,7 @@ class TemplateLiteral final : public Expression {
// class SpecificVisitor : public AstVisitor<SpecificVisitor> { ... }
template <class Subclass>
class AstVisitor BASE_EMBEDDED {
class AstVisitor {
public:
void Visit(AstNode* node) { impl()->Visit(node); }
@ -2823,7 +2836,7 @@ class AstVisitor BASE_EMBEDDED {
// ----------------------------------------------------------------------------
// AstNode factory
class AstNodeFactory final BASE_EMBEDDED {
class AstNodeFactory final {
public:
AstNodeFactory(AstValueFactory* ast_value_factory, Zone* zone)
: zone_(zone), ast_value_factory_(ast_value_factory) {}
@ -3330,7 +3343,6 @@ class AstNodeFactory final BASE_EMBEDDED {
}
Zone* zone() const { return zone_; }
void set_zone(Zone* zone) { zone_ = zone; }
private:
// This zone may be deallocated upon returning from parsing a function body

View File

@ -31,7 +31,7 @@ CallPrinter::CallPrinter(Isolate* isolate, bool is_user_js)
InitializeAstVisitor(isolate);
}
CallPrinter::~CallPrinter() {}
CallPrinter::~CallPrinter() = default;
CallPrinter::ErrorHint CallPrinter::GetErrorHint() const {
if (is_call_error_) {
@ -666,7 +666,7 @@ void AstPrinter::PrintLiteral(const AstConsString* value, bool quote) {
//-----------------------------------------------------------------------------
class IndentedScope BASE_EMBEDDED {
class IndentedScope {
public:
IndentedScope(AstPrinter* printer, const char* txt)
: ast_printer_(printer) {

66
deps/v8/src/ast/scopes-inl.h vendored Normal file
View File

@ -0,0 +1,66 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_AST_SCOPES_INL_H_
#define V8_AST_SCOPES_INL_H_
#include "src/ast/scopes.h"
namespace v8 {
namespace internal {
template <typename T>
void Scope::ResolveScopesThenForEachVariable(DeclarationScope* max_outer_scope,
T variable_proxy_stackvisitor,
ParseInfo* info) {
// Module variables must be allocated before variable resolution
// to ensure that UpdateNeedsHoleCheck() can detect import variables.
if (info != nullptr && is_module_scope()) {
AsModuleScope()->AllocateModuleVariables();
}
// Lazy parsed declaration scopes are already partially analyzed. If there are
// unresolved references remaining, they just need to be resolved in outer
// scopes.
Scope* lookup =
is_declaration_scope() && AsDeclarationScope()->was_lazily_parsed()
? outer_scope()
: this;
for (VariableProxy *proxy = unresolved_list_.first(), *next = nullptr;
proxy != nullptr; proxy = next) {
next = proxy->next_unresolved();
DCHECK(!proxy->is_resolved());
Variable* var =
lookup->LookupRecursive(info, proxy, max_outer_scope->outer_scope());
if (var == nullptr) {
variable_proxy_stackvisitor(proxy);
} else if (var != Scope::kDummyPreParserVariable &&
var != Scope::kDummyPreParserLexicalVariable) {
if (info != nullptr) {
// In this case we need to leave scopes in a way that they can be
// allocated. If we resolved variables from lazy parsed scopes, we need
// to context allocate the var.
ResolveTo(info, proxy, var);
if (!var->is_dynamic() && lookup != this) var->ForceContextAllocation();
} else {
var->set_is_used();
if (proxy->is_assigned()) var->set_maybe_assigned();
}
}
}
// Clear unresolved_list_ as it's in an inconsistent state.
unresolved_list_.Clear();
for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
scope->ResolveScopesThenForEachVariable(max_outer_scope,
variable_proxy_stackvisitor, info);
}
}
} // namespace internal
} // namespace v8
#endif // V8_AST_SCOPES_INL_H_

View File

@ -8,6 +8,7 @@
#include "src/accessors.h"
#include "src/ast/ast.h"
#include "src/ast/scopes-inl.h"
#include "src/base/optional.h"
#include "src/bootstrapper.h"
#include "src/counters.h"
@ -23,15 +24,11 @@ namespace v8 {
namespace internal {
namespace {
void* kDummyPreParserVariable = reinterpret_cast<void*>(0x1);
void* kDummyPreParserLexicalVariable = reinterpret_cast<void*>(0x2);
bool IsLexical(Variable* variable) {
if (variable == kDummyPreParserLexicalVariable) return true;
if (variable == kDummyPreParserVariable) return false;
if (variable == Scope::kDummyPreParserLexicalVariable) return true;
if (variable == Scope::kDummyPreParserVariable) return false;
return IsLexicalVariableMode(variable->mode());
}
} // namespace
// ----------------------------------------------------------------------------
@ -76,8 +73,9 @@ Variable* VariableMap::DeclareName(Zone* zone, const AstRawString* name,
if (p->value == nullptr) {
// The variable has not been declared yet -> insert it.
DCHECK_EQ(name, p->key);
p->value = mode == VariableMode::kVar ? kDummyPreParserVariable
: kDummyPreParserLexicalVariable;
p->value = mode == VariableMode::kVar
? Scope::kDummyPreParserVariable
: Scope::kDummyPreParserLexicalVariable;
}
return reinterpret_cast<Variable*>(p->value);
}
@ -154,7 +152,7 @@ Scope::Scope(Zone* zone, Scope* outer_scope, ScopeType scope_type)
Scope::Snapshot::Snapshot(Scope* scope)
: outer_scope_(scope),
top_inner_scope_(scope->inner_scope_),
top_unresolved_(scope->unresolved_),
top_unresolved_(scope->unresolved_list_.first()),
top_local_(scope->GetClosureScope()->locals_.end()),
top_decl_(scope->GetClosureScope()->decls_.end()),
outer_scope_calls_eval_(scope->scope_calls_eval_) {
@ -310,6 +308,8 @@ void DeclarationScope::SetDefaults() {
has_arguments_parameter_ = false;
scope_uses_super_property_ = false;
has_rest_ = false;
has_promise_ = false;
has_generator_object_ = false;
sloppy_block_function_map_ = nullptr;
receiver_ = nullptr;
new_target_ = nullptr;
@ -319,7 +319,7 @@ void DeclarationScope::SetDefaults() {
should_eager_compile_ = false;
was_lazily_parsed_ = false;
is_skipped_function_ = false;
produced_preparsed_scope_data_ = nullptr;
preparsed_scope_data_builder_ = nullptr;
#ifdef DEBUG
DeclarationScope* outer_declaration_scope =
outer_scope_ ? outer_scope_->GetDeclarationScope() : nullptr;
@ -337,7 +337,7 @@ void Scope::SetDefaults() {
#endif
inner_scope_ = nullptr;
sibling_ = nullptr;
unresolved_ = nullptr;
unresolved_list_.Clear();
start_position_ = kNoSourcePosition;
end_position_ = kNoSourcePosition;
@ -779,6 +779,7 @@ Variable* DeclarationScope::DeclareGeneratorObjectVar(
Variable* result = EnsureRareData()->generator_object =
NewTemporary(name, kNotAssigned);
result->set_is_used();
has_generator_object_ = true;
return result;
}
@ -787,6 +788,7 @@ Variable* DeclarationScope::DeclarePromiseVar(const AstRawString* name) {
DCHECK_NULL(promise_var());
Variable* result = EnsureRareData()->promise = NewTemporary(name);
result->set_is_used();
has_promise_ = true;
return result;
}
@ -834,16 +836,9 @@ Scope* Scope::FinalizeBlockScope() {
}
// Move unresolved variables
if (unresolved_ != nullptr) {
if (outer_scope()->unresolved_ != nullptr) {
VariableProxy* unresolved = unresolved_;
while (unresolved->next_unresolved() != nullptr) {
unresolved = unresolved->next_unresolved();
}
unresolved->set_next_unresolved(outer_scope()->unresolved_);
}
outer_scope()->unresolved_ = unresolved_;
unresolved_ = nullptr;
if (!unresolved_list_.is_empty()) {
outer_scope()->unresolved_list_.Prepend(std::move(unresolved_list_));
unresolved_list_.Clear();
}
if (inner_scope_calls_eval_) outer_scope()->inner_scope_calls_eval_ = true;
@ -887,7 +882,7 @@ void Scope::Snapshot::Reparent(DeclarationScope* new_parent) const {
DCHECK_EQ(new_parent->outer_scope_, outer_scope_);
DCHECK_EQ(new_parent, new_parent->GetClosureScope());
DCHECK_NULL(new_parent->inner_scope_);
DCHECK_NULL(new_parent->unresolved_);
DCHECK(new_parent->unresolved_list_.is_empty());
DCHECK(new_parent->locals_.is_empty());
Scope* inner_scope = new_parent->sibling_;
if (inner_scope != top_inner_scope_) {
@ -910,14 +905,21 @@ void Scope::Snapshot::Reparent(DeclarationScope* new_parent) const {
new_parent->sibling_ = top_inner_scope_;
}
if (outer_scope_->unresolved_ != top_unresolved_) {
VariableProxy* last = outer_scope_->unresolved_;
while (last->next_unresolved() != top_unresolved_) {
last = last->next_unresolved();
if (outer_scope_->unresolved_list_.first() != top_unresolved_) {
// If the marked VariableProxy (snapshoted) is not the first, we need to
// find it and move all VariableProxys up to that point into the new_parent,
// then we restore the snapshoted state by reinitializing the outer_scope
// list.
{
auto iter = outer_scope_->unresolved_list_.begin();
while (*iter != top_unresolved_) {
++iter;
}
outer_scope_->unresolved_list_.Rewind(iter);
}
last->set_next_unresolved(nullptr);
new_parent->unresolved_ = outer_scope_->unresolved_;
outer_scope_->unresolved_ = top_unresolved_;
new_parent->unresolved_list_ = std::move(outer_scope_->unresolved_list_);
outer_scope_->unresolved_list_.ReinitializeHead(top_unresolved_);
}
// TODO(verwaest): This currently only moves do-expression declared variables
@ -1261,8 +1263,7 @@ void Scope::DeclareCatchVariableName(const AstRawString* name) {
void Scope::AddUnresolved(VariableProxy* proxy) {
DCHECK(!already_resolved_);
DCHECK(!proxy->is_resolved());
proxy->set_next_unresolved(unresolved_);
unresolved_ = proxy;
unresolved_list_.AddFront(proxy);
}
Variable* DeclarationScope::DeclareDynamicGlobal(const AstRawString* name,
@ -1274,22 +1275,7 @@ Variable* DeclarationScope::DeclareDynamicGlobal(const AstRawString* name,
}
bool Scope::RemoveUnresolved(VariableProxy* var) {
if (unresolved_ == var) {
unresolved_ = var->next_unresolved();
var->set_next_unresolved(nullptr);
return true;
}
VariableProxy* current = unresolved_;
while (current != nullptr) {
VariableProxy* next = current->next_unresolved();
if (var == next) {
current->set_next_unresolved(next->next_unresolved());
var->set_next_unresolved(nullptr);
return true;
}
current = next;
}
return false;
return unresolved_list_.Remove(var);
}
Variable* Scope::NewTemporary(const AstRawString* name) {
@ -1483,11 +1469,12 @@ Scope* Scope::GetOuterScopeWithContext() {
Handle<StringSet> DeclarationScope::CollectNonLocals(
Isolate* isolate, ParseInfo* info, Handle<StringSet> non_locals) {
VariableProxy* free_variables = FetchFreeVariables(this, info);
for (VariableProxy* proxy = free_variables; proxy != nullptr;
proxy = proxy->next_unresolved()) {
non_locals = StringSet::Add(isolate, non_locals, proxy->name());
}
ResolveScopesThenForEachVariable(this,
[=, &non_locals](VariableProxy* proxy) {
non_locals = StringSet::Add(
isolate, non_locals, proxy->name());
},
info);
return non_locals;
}
@ -1504,10 +1491,15 @@ void DeclarationScope::ResetAfterPreparsing(AstValueFactory* ast_value_factory,
decls_.Clear();
locals_.Clear();
inner_scope_ = nullptr;
unresolved_ = nullptr;
unresolved_list_.Clear();
sloppy_block_function_map_ = nullptr;
rare_data_ = nullptr;
has_rest_ = false;
has_promise_ = false;
has_generator_object_ = false;
DCHECK_NE(zone_, ast_value_factory->zone());
zone_->ReleaseMemory();
if (aborted) {
// Prepare scope for use in the outer zone.
@ -1532,7 +1524,7 @@ void DeclarationScope::ResetAfterPreparsing(AstValueFactory* ast_value_factory,
void Scope::SavePreParsedScopeData() {
DCHECK(FLAG_preparser_scope_analysis);
if (ProducedPreParsedScopeData::ScopeIsSkippableFunctionScope(this)) {
if (PreParsedScopeDataBuilder::ScopeIsSkippableFunctionScope(this)) {
AsDeclarationScope()->SavePreParsedScopeDataForDeclarationScope();
}
@ -1542,30 +1534,33 @@ void Scope::SavePreParsedScopeData() {
}
void DeclarationScope::SavePreParsedScopeDataForDeclarationScope() {
if (produced_preparsed_scope_data_ != nullptr) {
if (preparsed_scope_data_builder_ != nullptr) {
DCHECK(FLAG_preparser_scope_analysis);
produced_preparsed_scope_data_->SaveScopeAllocationData(this);
preparsed_scope_data_builder_->SaveScopeAllocationData(this);
}
}
void DeclarationScope::AnalyzePartially(AstNodeFactory* ast_node_factory) {
DCHECK(!force_eager_compilation_);
VariableProxy* unresolved = nullptr;
if (!outer_scope_->is_script_scope() ||
(FLAG_preparser_scope_analysis &&
produced_preparsed_scope_data_ != nullptr &&
produced_preparsed_scope_data_->ContainsInnerFunctions())) {
base::ThreadedList<VariableProxy> new_unresolved_list;
if (!IsArrowFunction(function_kind_) &&
(!outer_scope_->is_script_scope() ||
(FLAG_preparser_scope_analysis &&
preparsed_scope_data_builder_ != nullptr &&
preparsed_scope_data_builder_->ContainsInnerFunctions()))) {
// Try to resolve unresolved variables for this Scope and migrate those
// which cannot be resolved inside. It doesn't make sense to try to resolve
// them in the outer Scopes here, because they are incomplete.
for (VariableProxy* proxy = FetchFreeVariables(this); proxy != nullptr;
proxy = proxy->next_unresolved()) {
DCHECK(!proxy->is_resolved());
VariableProxy* copy = ast_node_factory->CopyVariableProxy(proxy);
copy->set_next_unresolved(unresolved);
unresolved = copy;
}
ResolveScopesThenForEachVariable(
this, [=, &new_unresolved_list](VariableProxy* proxy) {
// Don't copy unresolved references to the script scope, unless it's a
// reference to a private field. In that case keep it so we can fail
// later.
if (!outer_scope_->is_script_scope() || proxy->is_private_field()) {
VariableProxy* copy = ast_node_factory->CopyVariableProxy(proxy);
new_unresolved_list.AddFront(copy);
}
});
// Migrate function_ to the right Zone.
if (function_ != nullptr) {
@ -1586,7 +1581,7 @@ void DeclarationScope::AnalyzePartially(AstNodeFactory* ast_node_factory) {
ResetAfterPreparsing(ast_node_factory->ast_value_factory(), false);
unresolved_ = unresolved;
unresolved_list_ = std::move(new_unresolved_list);
}
#ifdef DEBUG
@ -1673,8 +1668,8 @@ void PrintMap(int indent, const char* label, VariableMap* map, bool locals,
for (VariableMap::Entry* p = map->Start(); p != nullptr; p = map->Next(p)) {
Variable* var = reinterpret_cast<Variable*>(p->value);
if (var == function_var) continue;
if (var == kDummyPreParserVariable ||
var == kDummyPreParserLexicalVariable) {
if (var == Scope::kDummyPreParserVariable ||
var == Scope::kDummyPreParserLexicalVariable) {
continue;
}
bool local = !IsDynamicVariableMode(var->mode());
@ -2045,8 +2040,7 @@ bool Scope::ResolveVariablesRecursively(ParseInfo* info) {
// scopes.
if (is_declaration_scope() && AsDeclarationScope()->was_lazily_parsed()) {
DCHECK_EQ(variables_.occupancy(), 0);
for (VariableProxy* proxy = unresolved_; proxy != nullptr;
proxy = proxy->next_unresolved()) {
for (VariableProxy* proxy : unresolved_list_) {
Variable* var = outer_scope()->LookupRecursive(info, proxy, nullptr);
if (var == nullptr) {
DCHECK(proxy->is_private_field());
@ -2060,8 +2054,7 @@ bool Scope::ResolveVariablesRecursively(ParseInfo* info) {
}
} else {
// Resolve unresolved variables for this scope.
for (VariableProxy* proxy = unresolved_; proxy != nullptr;
proxy = proxy->next_unresolved()) {
for (VariableProxy* proxy : unresolved_list_) {
if (!ResolveVariable(info, proxy)) return false;
}
@ -2074,57 +2067,6 @@ bool Scope::ResolveVariablesRecursively(ParseInfo* info) {
return true;
}
VariableProxy* Scope::FetchFreeVariables(DeclarationScope* max_outer_scope,
ParseInfo* info,
VariableProxy* stack) {
// Module variables must be allocated before variable resolution
// to ensure that UpdateNeedsHoleCheck() can detect import variables.
if (info != nullptr && is_module_scope()) {
AsModuleScope()->AllocateModuleVariables();
}
// Lazy parsed declaration scopes are already partially analyzed. If there are
// unresolved references remaining, they just need to be resolved in outer
// scopes.
Scope* lookup =
is_declaration_scope() && AsDeclarationScope()->was_lazily_parsed()
? outer_scope()
: this;
for (VariableProxy *proxy = unresolved_, *next = nullptr; proxy != nullptr;
proxy = next) {
next = proxy->next_unresolved();
DCHECK(!proxy->is_resolved());
Variable* var =
lookup->LookupRecursive(info, proxy, max_outer_scope->outer_scope());
if (var == nullptr) {
proxy->set_next_unresolved(stack);
stack = proxy;
} else if (var != kDummyPreParserVariable &&
var != kDummyPreParserLexicalVariable) {
if (info != nullptr) {
// In this case we need to leave scopes in a way that they can be
// allocated. If we resolved variables from lazy parsed scopes, we need
// to context allocate the var.
ResolveTo(info, proxy, var);
if (!var->is_dynamic() && lookup != this) var->ForceContextAllocation();
} else {
var->set_is_used();
if (proxy->is_assigned()) {
var->set_maybe_assigned();
}
}
}
}
// Clear unresolved_ as it's in an inconsistent state.
unresolved_ = nullptr;
for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
stack = scope->FetchFreeVariables(max_outer_scope, info, stack);
}
return stack;
}
bool Scope::MustAllocate(Variable* var) {
if (var == kDummyPreParserLexicalVariable || var == kDummyPreParserVariable) {
return true;
@ -2236,6 +2178,24 @@ void DeclarationScope::AllocateReceiver() {
AllocateParameter(receiver(), -1);
}
void DeclarationScope::AllocatePromise() {
if (!has_promise_) return;
DCHECK_NOT_NULL(promise_var());
DCHECK_EQ(this, promise_var()->scope());
AllocateStackSlot(promise_var());
DCHECK_EQ(VariableLocation::LOCAL, promise_var()->location());
DCHECK_EQ(kPromiseVarIndex, promise_var()->index());
}
void DeclarationScope::AllocateGeneratorObject() {
if (!has_generator_object_) return;
DCHECK_NOT_NULL(generator_object_var());
DCHECK_EQ(this, generator_object_var()->scope());
AllocateStackSlot(generator_object_var());
DCHECK_EQ(VariableLocation::LOCAL, generator_object_var()->location());
DCHECK_EQ(kGeneratorObjectVarIndex, generator_object_var()->index());
}
void Scope::AllocateNonParameterLocal(Variable* var) {
DCHECK(var->scope() == this);
if (var->IsUnallocated() && MustAllocate(var)) {
@ -2304,6 +2264,19 @@ void Scope::AllocateVariablesRecursively() {
return;
}
// Make sure to allocate the .promise (for async functions) or
// .generator_object (for async generators) first, so that it
// get's the required stack slot 0 in case it's needed. See
// http://bit.ly/v8-zero-cost-async-stack-traces for details.
if (is_function_scope()) {
FunctionKind kind = GetClosureScope()->function_kind();
if (IsAsyncGeneratorFunction(kind)) {
AsDeclarationScope()->AllocateGeneratorObject();
} else if (IsAsyncFunction(kind)) {
AsDeclarationScope()->AllocatePromise();
}
}
// Allocate variables for inner scopes.
for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
scope->AllocateVariablesRecursively();
@ -2410,5 +2383,9 @@ int Scope::ContextLocalCount() const {
(is_function_var_in_context ? 1 : 0);
}
void* const Scope::kDummyPreParserVariable = reinterpret_cast<void*>(0x1);
void* const Scope::kDummyPreParserLexicalVariable =
reinterpret_cast<void*>(0x2);
} // namespace internal
} // namespace v8

View File

@ -20,8 +20,7 @@ class AstValueFactory;
class AstRawString;
class Declaration;
class ParseInfo;
class PreParsedScopeData;
class ProducedPreParsedScopeData;
class PreParsedScopeDataBuilder;
class SloppyBlockFunctionStatement;
class Statement;
class StringSet;
@ -103,7 +102,6 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
void SetScopeName(const AstRawString* scope_name) {
scope_name_ = scope_name;
}
void set_needs_migration() { needs_migration_ = true; }
#endif
// TODO(verwaest): Is this needed on Scope?
@ -114,7 +112,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
ModuleScope* AsModuleScope();
const ModuleScope* AsModuleScope() const;
class Snapshot final BASE_EMBEDDED {
class Snapshot final {
public:
explicit Snapshot(Scope* scope);
~Snapshot();
@ -125,8 +123,8 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
Scope* outer_scope_;
Scope* top_inner_scope_;
VariableProxy* top_unresolved_;
ThreadedList<Variable>::Iterator top_local_;
ThreadedList<Declaration>::Iterator top_decl_;
base::ThreadedList<Variable>::Iterator top_local_;
base::ThreadedList<Declaration>::Iterator top_decl_;
const bool outer_scope_calls_eval_;
};
@ -203,9 +201,9 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
void DeclareCatchVariableName(const AstRawString* name);
// Declarations list.
ThreadedList<Declaration>* declarations() { return &decls_; }
base::ThreadedList<Declaration>* declarations() { return &decls_; }
ThreadedList<Variable>* locals() { return &locals_; }
base::ThreadedList<Variable>* locals() { return &locals_; }
// Create a new unresolved variable.
VariableProxy* NewUnresolved(AstNodeFactory* factory,
@ -218,8 +216,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
DCHECK(!already_resolved_);
DCHECK_EQ(factory->zone(), zone());
VariableProxy* proxy = factory->NewVariableProxy(name, kind, start_pos);
proxy->set_next_unresolved(unresolved_);
unresolved_ = proxy;
AddUnresolved(proxy);
return proxy;
}
@ -480,6 +477,9 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
return false;
}
static void* const kDummyPreParserVariable;
static void* const kDummyPreParserLexicalVariable;
protected:
explicit Scope(Zone* zone);
@ -522,12 +522,12 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
VariableMap variables_;
// In case of non-scopeinfo-backed scopes, this contains the variables of the
// map above in order of addition.
ThreadedList<Variable> locals_;
base::ThreadedList<Variable> locals_;
// Unresolved variables referred to from this scope. The proxies themselves
// form a linked list of all unresolved proxies.
VariableProxy* unresolved_;
base::ThreadedList<VariableProxy> unresolved_list_;
// Declarations.
ThreadedList<Declaration> decls_;
base::ThreadedList<Declaration> decls_;
// Serialized scope info support.
Handle<ScopeInfo> scope_info_;
@ -597,9 +597,10 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// Finds free variables of this scope. This mutates the unresolved variables
// list along the way, so full resolution cannot be done afterwards.
// If a ParseInfo* is passed, non-free variables will be resolved.
VariableProxy* FetchFreeVariables(DeclarationScope* max_outer_scope,
ParseInfo* info = nullptr,
VariableProxy* stack = nullptr);
template <typename T>
void ResolveScopesThenForEachVariable(DeclarationScope* max_outer_scope,
T variable_proxy_stackvisitor,
ParseInfo* info = nullptr);
// Predicates.
bool MustAllocate(Variable* var);
@ -682,6 +683,12 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
}
bool is_being_lazily_parsed() const { return is_being_lazily_parsed_; }
#endif
void set_zone(Zone* zone) {
#ifdef DEBUG
needs_migration_ = true;
#endif
zone_ = zone;
}
bool ShouldEagerCompile() const;
void set_should_eager_compile();
@ -759,11 +766,22 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
// literals, or nullptr. Only valid for function scopes.
Variable* function_var() const { return function_; }
// The variable holding the JSGeneratorObject for generator, async
// and async generator functions, and modules. Only valid for
// function and module scopes.
Variable* generator_object_var() const {
DCHECK(is_function_scope() || is_module_scope());
return GetRareVariable(RareVariable::kGeneratorObject);
}
// For async generators, the .generator_object variable is always
// allocated to a fixed stack slot, such that the stack trace
// construction logic can access it.
static constexpr int kGeneratorObjectVarIndex = 0;
// The variable holding the promise returned from async functions.
// Only valid for function scopes in async functions (i.e. not
// for async generators).
Variable* promise_var() const {
DCHECK(is_function_scope());
DCHECK(IsAsyncFunction(function_kind_));
@ -771,6 +789,11 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
return GetRareVariable(RareVariable::kPromise);
}
// For async functions, the .promise variable is always allocated
// to a fixed stack slot, such that the stack trace construction
// logic can access it.
static constexpr int kPromiseVarIndex = 0;
// Parameters. The left-most parameter has index 0.
// Only valid for function and module scopes.
Variable* parameter(int index) const {
@ -898,6 +921,8 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
void AllocateLocals();
void AllocateParameterLocals();
void AllocateReceiver();
void AllocatePromise();
void AllocateGeneratorObject();
void ResetAfterPreparsing(AstValueFactory* ast_value_factory, bool aborted);
@ -919,13 +944,13 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
// saved in produced_preparsed_scope_data_.
void SavePreParsedScopeDataForDeclarationScope();
void set_produced_preparsed_scope_data(
ProducedPreParsedScopeData* produced_preparsed_scope_data) {
produced_preparsed_scope_data_ = produced_preparsed_scope_data;
void set_preparsed_scope_data_builder(
PreParsedScopeDataBuilder* preparsed_scope_data_builder) {
preparsed_scope_data_builder_ = preparsed_scope_data_builder;
}
ProducedPreParsedScopeData* produced_preparsed_scope_data() const {
return produced_preparsed_scope_data_;
PreParsedScopeDataBuilder* preparsed_scope_data_builder() const {
return preparsed_scope_data_builder_;
}
private:
@ -954,6 +979,10 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
bool force_eager_compilation_ : 1;
// This function scope has a rest parameter.
bool has_rest_ : 1;
// This function scope has a .promise variable.
bool has_promise_ : 1;
// This function scope has a .generator_object variable.
bool has_generator_object_ : 1;
// This scope has a parameter called "arguments".
bool has_arguments_parameter_ : 1;
// This scope uses "super" property ('super.foo').
@ -981,7 +1010,7 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope {
Variable* arguments_;
// For producing the scope allocation data during preparsing.
ProducedPreParsedScopeData* produced_preparsed_scope_data_;
PreParsedScopeDataBuilder* preparsed_scope_data_builder_;
struct RareData : public ZoneObject {
// Convenience variable; Subclass constructor only

View File

@ -181,7 +181,7 @@ class Variable final : public ZoneObject {
: kNeedsInitialization;
}
typedef ThreadedList<Variable> List;
typedef base::ThreadedList<Variable> List;
private:
Scope* scope_;
@ -215,6 +215,7 @@ class Variable final : public ZoneObject {
ForceHoleInitializationField::kNext, 1> {};
Variable** next() { return &next_; }
friend List;
friend base::ThreadedListTraits<Variable>;
};
} // namespace internal
} // namespace v8

70
deps/v8/src/base/address-region.h vendored Normal file
View File

@ -0,0 +1,70 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_BASE_ADDRESS_REGION_H_
#define V8_BASE_ADDRESS_REGION_H_
#include <iostream>
#include "src/base/macros.h"
namespace v8 {
namespace base {
// Helper class representing an address region of certain size.
class AddressRegion {
public:
typedef uintptr_t Address;
AddressRegion() = default;
AddressRegion(Address address, size_t size)
: address_(address), size_(size) {}
Address begin() const { return address_; }
Address end() const { return address_ + size_; }
size_t size() const { return size_; }
void set_size(size_t size) { size_ = size; }
bool is_empty() const { return size_ == 0; }
bool contains(Address address) const {
STATIC_ASSERT(std::is_unsigned<Address>::value);
return (address - begin()) < size();
}
bool contains(Address address, size_t size) const {
STATIC_ASSERT(std::is_unsigned<Address>::value);
Address offset = address - begin();
return (offset < size_) && (offset + size <= size_);
}
bool contains(AddressRegion region) const {
return contains(region.address_, region.size_);
}
bool operator==(AddressRegion other) const {
return address_ == other.address_ && size_ == other.size_;
}
bool operator!=(AddressRegion other) const {
return address_ != other.address_ || size_ != other.size_;
}
private:
Address address_ = 0;
size_t size_ = 0;
};
ASSERT_TRIVIALLY_COPYABLE(AddressRegion);
inline std::ostream& operator<<(std::ostream& out, AddressRegion region) {
return out << "[" << reinterpret_cast<void*>(region.begin()) << "+"
<< region.size() << "]";
}
} // namespace base
} // namespace v8
#endif // V8_BASE_ADDRESS_REGION_H_

View File

@ -377,6 +377,22 @@ class AtomicElement {
T value_;
};
template <typename T,
typename = typename std::enable_if<std::is_unsigned<T>::value>::type>
inline void CheckedIncrement(std::atomic<T>* number, T amount) {
const T old = number->fetch_add(amount);
DCHECK_GE(old + amount, old);
USE(old);
}
template <typename T,
typename = typename std::enable_if<std::is_unsigned<T>::value>::type>
inline void CheckedDecrement(std::atomic<T>* number, T amount) {
const T old = number->fetch_sub(amount);
DCHECK_GE(old, amount);
USE(old);
}
} // namespace base
} // namespace v8

View File

@ -146,6 +146,14 @@ constexpr inline bool IsPowerOfTwo(T value) {
V8_BASE_EXPORT uint32_t RoundUpToPowerOfTwo32(uint32_t value);
// Same for 64 bit integers. |value| must be <= 2^63
V8_BASE_EXPORT uint64_t RoundUpToPowerOfTwo64(uint64_t value);
// Same for size_t integers.
inline size_t RoundUpToPowerOfTwo(size_t value) {
if (sizeof(size_t) == sizeof(uint64_t)) {
return RoundUpToPowerOfTwo64(value);
} else {
return RoundUpToPowerOfTwo32(value);
}
}
// RoundDownToPowerOfTwo32(value) returns the greatest power of two which is
// less than or equal to |value|. If you pass in a |value| that is already a

View File

@ -0,0 +1,101 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/base/bounded-page-allocator.h"
namespace v8 {
namespace base {
BoundedPageAllocator::BoundedPageAllocator(v8::PageAllocator* page_allocator,
Address start, size_t size,
size_t allocate_page_size)
: allocate_page_size_(allocate_page_size),
commit_page_size_(page_allocator->CommitPageSize()),
page_allocator_(page_allocator),
region_allocator_(start, size, allocate_page_size_) {
CHECK_NOT_NULL(page_allocator);
CHECK(IsAligned(allocate_page_size, page_allocator->AllocatePageSize()));
CHECK(IsAligned(allocate_page_size_, commit_page_size_));
}
BoundedPageAllocator::Address BoundedPageAllocator::begin() const {
return region_allocator_.begin();
}
size_t BoundedPageAllocator::size() const { return region_allocator_.size(); }
void* BoundedPageAllocator::AllocatePages(void* hint, size_t size,
size_t alignment,
PageAllocator::Permission access) {
LockGuard<Mutex> guard(&mutex_);
CHECK(IsAligned(alignment, region_allocator_.page_size()));
// Region allocator does not support alignments bigger than it's own
// allocation alignment.
CHECK_LE(alignment, allocate_page_size_);
// TODO(ishell): Consider using randomized version here.
Address address = region_allocator_.AllocateRegion(size);
if (address == RegionAllocator::kAllocationFailure) {
return nullptr;
}
CHECK(page_allocator_->SetPermissions(reinterpret_cast<void*>(address), size,
access));
return reinterpret_cast<void*>(address);
}
bool BoundedPageAllocator::FreePages(void* raw_address, size_t size) {
LockGuard<Mutex> guard(&mutex_);
Address address = reinterpret_cast<Address>(raw_address);
size_t freed_size = region_allocator_.FreeRegion(address);
if (freed_size != size) return false;
CHECK(page_allocator_->SetPermissions(raw_address, size,
PageAllocator::kNoAccess));
return true;
}
bool BoundedPageAllocator::ReleasePages(void* raw_address, size_t size,
size_t new_size) {
Address address = reinterpret_cast<Address>(raw_address);
CHECK(IsAligned(address, allocate_page_size_));
DCHECK_LT(new_size, size);
DCHECK(IsAligned(size - new_size, commit_page_size_));
// Check if we freed any allocatable pages by this release.
size_t allocated_size = RoundUp(size, allocate_page_size_);
size_t new_allocated_size = RoundUp(new_size, allocate_page_size_);
#ifdef DEBUG
{
// There must be an allocated region at given |address| of a size not
// smaller than |size|.
LockGuard<Mutex> guard(&mutex_);
CHECK_EQ(allocated_size, region_allocator_.CheckRegion(address));
}
#endif
if (new_allocated_size < allocated_size) {
LockGuard<Mutex> guard(&mutex_);
region_allocator_.TrimRegion(address, new_allocated_size);
}
// Keep the region in "used" state just uncommit some pages.
Address free_address = address + new_size;
size_t free_size = size - new_size;
return page_allocator_->SetPermissions(reinterpret_cast<void*>(free_address),
free_size, PageAllocator::kNoAccess);
}
bool BoundedPageAllocator::SetPermissions(void* address, size_t size,
PageAllocator::Permission access) {
DCHECK(IsAligned(reinterpret_cast<Address>(address), commit_page_size_));
DCHECK(IsAligned(size, commit_page_size_));
DCHECK(region_allocator_.contains(reinterpret_cast<Address>(address), size));
return page_allocator_->SetPermissions(address, size, access);
}
} // namespace base
} // namespace v8

View File

@ -0,0 +1,79 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_BASE_BOUNDED_PAGE_ALLOCATOR_H_
#define V8_BASE_BOUNDED_PAGE_ALLOCATOR_H_
#include "include/v8-platform.h"
#include "src/base/platform/mutex.h"
#include "src/base/region-allocator.h"
namespace v8 {
namespace base {
// This is a v8::PageAllocator implementation that allocates pages within the
// pre-reserved region of virtual space. This class requires the virtual space
// to be kept reserved during the lifetime of this object.
// The main application of bounded page allocator are
// - V8 heap pointer compression which requires the whole V8 heap to be
// allocated within a contiguous range of virtual address space,
// - executable page allocation, which allows to use PC-relative 32-bit code
// displacement on certain 64-bit platforms.
// Bounded page allocator uses other page allocator instance for doing actual
// page allocations.
// The implementation is thread-safe.
class V8_BASE_EXPORT BoundedPageAllocator : public v8::PageAllocator {
public:
typedef uintptr_t Address;
BoundedPageAllocator(v8::PageAllocator* page_allocator, Address start,
size_t size, size_t allocate_page_size);
~BoundedPageAllocator() override = default;
// These functions are not inlined to avoid https://crbug.com/v8/8275.
Address begin() const;
size_t size() const;
// Returns true if given address is in the range controlled by the bounded
// page allocator instance.
bool contains(Address address) const {
return region_allocator_.contains(address);
}
size_t AllocatePageSize() override { return allocate_page_size_; }
size_t CommitPageSize() override { return commit_page_size_; }
void SetRandomMmapSeed(int64_t seed) override {
page_allocator_->SetRandomMmapSeed(seed);
}
void* GetRandomMmapAddr() override {
return page_allocator_->GetRandomMmapAddr();
}
void* AllocatePages(void* address, size_t size, size_t alignment,
PageAllocator::Permission access) override;
bool FreePages(void* address, size_t size) override;
bool ReleasePages(void* address, size_t size, size_t new_size) override;
bool SetPermissions(void* address, size_t size,
PageAllocator::Permission access) override;
private:
v8::base::Mutex mutex_;
const size_t allocate_page_size_;
const size_t commit_page_size_;
v8::PageAllocator* const page_allocator_;
v8::base::RegionAllocator region_allocator_;
DISALLOW_COPY_AND_ASSIGN(BoundedPageAllocator);
};
} // namespace base
} // namespace v8
#endif // V8_BASE_BOUNDED_PAGE_ALLOCATOR_H_

View File

@ -196,9 +196,9 @@
#endif
#if defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_X64)
#define V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK 1
#define V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK true
#else
#define V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK 0
#define V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK false
#endif
// Number of bits to represent the page size for paged spaces. The value of 19

View File

@ -21,7 +21,7 @@ StackTrace::StackTrace(const void* const* trace, size_t count) {
count_ = count;
}
StackTrace::~StackTrace() {}
StackTrace::~StackTrace() = default;
const void* const* StackTrace::Addresses(size_t* count) const {
*count = count_;

View File

@ -61,7 +61,7 @@ char* itoa_r(intptr_t i, char* buf, size_t sz, int base, size_t padding);
namespace {
volatile sig_atomic_t in_signal_handler = 0;
bool dump_stack_in_signal_handler = 1;
bool dump_stack_in_signal_handler = true;
// The prefix used for mangled symbols, per the Itanium C++ ABI:
// http://www.codesourcery.com/cxx-abi/abi.html#mangling
@ -104,7 +104,7 @@ void DemangleSymbols(std::string* text) {
// Try to demangle the mangled symbol candidate.
int status = 0;
std::unique_ptr<char, FreeDeleter> demangled_symbol(
abi::__cxa_demangle(mangled_symbol.c_str(), nullptr, 0, &status));
abi::__cxa_demangle(mangled_symbol.c_str(), nullptr, nullptr, &status));
if (status == 0) { // Demangling is successful.
// Remove the mangled symbol.
text->erase(mangled_start, mangled_end - mangled_start);
@ -125,7 +125,7 @@ class BacktraceOutputHandler {
virtual void HandleOutput(const char* output) = 0;
protected:
virtual ~BacktraceOutputHandler() {}
virtual ~BacktraceOutputHandler() = default;
};
#if HAVE_EXECINFO_H
@ -266,7 +266,7 @@ void StackDumpSignalHandler(int signal, siginfo_t* info, void* void_context) {
class PrintBacktraceOutputHandler : public BacktraceOutputHandler {
public:
PrintBacktraceOutputHandler() {}
PrintBacktraceOutputHandler() = default;
void HandleOutput(const char* output) override {
// NOTE: This code MUST be async-signal safe (it's used by in-process

View File

@ -7,13 +7,6 @@
#include "src/base/debug/stack_trace.h"
// This file can't use "src/base/win32-headers.h" because it defines symbols
// that lead to compilation errors. But `NOMINMAX` should be defined to disable
// defining of the `min` and `max` MACROS.
#ifndef NOMINMAX
#define NOMINMAX
#endif
#include <windows.h>
#include <dbghelp.h>
#include <Shlwapi.h>

View File

@ -90,7 +90,7 @@ typedef union {
ew_u.value = (d); \
(ix0) = ew_u.parts.msw; \
(ix1) = ew_u.parts.lsw; \
} while (0)
} while (false)
/* Get a 64-bit int from a double. */
#define EXTRACT_WORD64(ix, d) \
@ -98,7 +98,7 @@ typedef union {
ieee_double_shape_type ew_u; \
ew_u.value = (d); \
(ix) = ew_u.xparts.w; \
} while (0)
} while (false)
/* Get the more significant 32 bit int from a double. */
@ -107,7 +107,7 @@ typedef union {
ieee_double_shape_type gh_u; \
gh_u.value = (d); \
(i) = gh_u.parts.msw; \
} while (0)
} while (false)
/* Get the less significant 32 bit int from a double. */
@ -116,7 +116,7 @@ typedef union {
ieee_double_shape_type gl_u; \
gl_u.value = (d); \
(i) = gl_u.parts.lsw; \
} while (0)
} while (false)
/* Set a double from two 32 bit ints. */
@ -126,7 +126,7 @@ typedef union {
iw_u.parts.msw = (ix0); \
iw_u.parts.lsw = (ix1); \
(d) = iw_u.value; \
} while (0)
} while (false)
/* Set a double from a 64-bit int. */
#define INSERT_WORD64(d, ix) \
@ -134,7 +134,7 @@ typedef union {
ieee_double_shape_type iw_u; \
iw_u.xparts.w = (ix); \
(d) = iw_u.value; \
} while (0)
} while (false)
/* Set the more significant 32 bits of a double from an int. */
@ -144,7 +144,7 @@ typedef union {
sh_u.value = (d); \
sh_u.parts.msw = (v); \
(d) = sh_u.value; \
} while (0)
} while (false)
/* Set the less significant 32 bits of a double from an int. */
@ -154,7 +154,7 @@ typedef union {
sl_u.value = (d); \
sl_u.parts.lsw = (v); \
(d) = sl_u.value; \
} while (0)
} while (false)
/* Support macro. */
@ -1210,9 +1210,9 @@ double atan(double x) {
if (ix > 0x7FF00000 || (ix == 0x7FF00000 && (low != 0)))
return x + x; /* NaN */
if (hx > 0)
return atanhi[3] + *(volatile double *)&atanlo[3];
return atanhi[3] + *const_cast<volatile double*>(&atanlo[3]);
else
return -atanhi[3] - *(volatile double *)&atanlo[3];
return -atanhi[3] - *const_cast<volatile double*>(&atanlo[3]);
}
if (ix < 0x3FDC0000) { /* |x| < 0.4375 */
if (ix < 0x3E400000) { /* |x| < 2^-27 */

View File

@ -49,7 +49,7 @@ V8_BASE_EXPORT void SetDcheckFunction(void (*dcheck_Function)(const char*, int,
if (V8_UNLIKELY(!(condition))) { \
FATAL("Check failed: %s.", message); \
} \
} while (0)
} while (false)
#define CHECK(condition) CHECK_WITH_MSG(condition, #condition)
#ifdef DEBUG
@ -59,7 +59,7 @@ V8_BASE_EXPORT void SetDcheckFunction(void (*dcheck_Function)(const char*, int,
if (V8_UNLIKELY(!(condition))) { \
V8_Dcheck(__FILE__, __LINE__, message); \
} \
} while (0)
} while (false)
#define DCHECK(condition) DCHECK_WITH_MSG(condition, #condition)
// Helper macro for binary operators.
@ -73,7 +73,7 @@ V8_BASE_EXPORT void SetDcheckFunction(void (*dcheck_Function)(const char*, int,
FATAL("Check failed: %s.", _msg->c_str()); \
delete _msg; \
} \
} while (0)
} while (false)
#define DCHECK_OP(name, op, lhs, rhs) \
do { \
@ -84,7 +84,7 @@ V8_BASE_EXPORT void SetDcheckFunction(void (*dcheck_Function)(const char*, int,
V8_Dcheck(__FILE__, __LINE__, _msg->c_str()); \
delete _msg; \
} \
} while (0)
} while (false)
#else
@ -98,7 +98,7 @@ V8_BASE_EXPORT void SetDcheckFunction(void (*dcheck_Function)(const char*, int,
typename ::v8::base::pass_value_or_ref<decltype(rhs)>::type>((lhs), \
(rhs)); \
CHECK_WITH_MSG(_cmp, #lhs " " #op " " #rhs); \
} while (0)
} while (false)
#define DCHECK_WITH_MSG(condition, msg) void(0);

59
deps/v8/src/base/lsan-page-allocator.cc vendored Normal file
View File

@ -0,0 +1,59 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/base/lsan-page-allocator.h"
#include "src/base/logging.h"
#if defined(LEAK_SANITIZER)
#include <sanitizer/lsan_interface.h>
#endif
namespace v8 {
namespace base {
LsanPageAllocator::LsanPageAllocator(v8::PageAllocator* page_allocator)
: page_allocator_(page_allocator),
allocate_page_size_(page_allocator_->AllocatePageSize()),
commit_page_size_(page_allocator_->CommitPageSize()) {
DCHECK_NOT_NULL(page_allocator);
}
void* LsanPageAllocator::AllocatePages(void* address, size_t size,
size_t alignment,
PageAllocator::Permission access) {
void* result =
page_allocator_->AllocatePages(address, size, alignment, access);
#if defined(LEAK_SANITIZER)
if (result != nullptr) {
__lsan_register_root_region(result, size);
}
#endif
return result;
}
bool LsanPageAllocator::FreePages(void* address, size_t size) {
bool result = page_allocator_->FreePages(address, size);
#if defined(LEAK_SANITIZER)
if (result) {
__lsan_unregister_root_region(address, size);
}
#endif
return result;
}
bool LsanPageAllocator::ReleasePages(void* address, size_t size,
size_t new_size) {
bool result = page_allocator_->ReleasePages(address, size, new_size);
#if defined(LEAK_SANITIZER)
if (result) {
__lsan_unregister_root_region(address, size);
__lsan_register_root_region(address, new_size);
}
#endif
return result;
}
} // namespace base
} // namespace v8

56
deps/v8/src/base/lsan-page-allocator.h vendored Normal file
View File

@ -0,0 +1,56 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_BASE_LSAN_PAGE_ALLOCATOR_H_
#define V8_BASE_LSAN_PAGE_ALLOCATOR_H_
#include "include/v8-platform.h"
#include "src/base/base-export.h"
#include "src/base/compiler-specific.h"
namespace v8 {
namespace base {
// This is a v8::PageAllocator implementation that decorates provided page
// allocator object with leak sanitizer notifications when LEAK_SANITIZER
// is defined.
class V8_BASE_EXPORT LsanPageAllocator
: public NON_EXPORTED_BASE(::v8::PageAllocator) {
public:
LsanPageAllocator(v8::PageAllocator* page_allocator);
~LsanPageAllocator() override = default;
size_t AllocatePageSize() override { return allocate_page_size_; }
size_t CommitPageSize() override { return commit_page_size_; }
void SetRandomMmapSeed(int64_t seed) override {
return page_allocator_->SetRandomMmapSeed(seed);
}
void* GetRandomMmapAddr() override {
return page_allocator_->GetRandomMmapAddr();
}
void* AllocatePages(void* address, size_t size, size_t alignment,
PageAllocator::Permission access) override;
bool FreePages(void* address, size_t size) override;
bool ReleasePages(void* address, size_t size, size_t new_size) override;
bool SetPermissions(void* address, size_t size,
PageAllocator::Permission access) override {
return page_allocator_->SetPermissions(address, size, access);
}
private:
v8::PageAllocator* const page_allocator_;
const size_t allocate_page_size_;
const size_t commit_page_size_;
};
} // namespace base
} // namespace v8
#endif // V8_BASE_LSAN_PAGE_ALLOCATOR_H_

View File

@ -14,6 +14,9 @@
// No-op macro which is used to work around MSVC's funky VA_ARGS support.
#define EXPAND(x) x
// This macro does nothing. That's all.
#define NOTHING(...)
// TODO(all) Replace all uses of this macro with C++'s offsetof. To do that, we
// have to make sure that only standard-layout types and simple field
// designators are used.
@ -195,8 +198,9 @@ V8_INLINE Dest bit_cast(Source const& source) {
#define V8_IMMEDIATE_CRASH() ((void(*)())0)()
#endif
// TODO(all) Replace all uses of this macro with static_assert, remove macro.
// A convenience wrapper around static_assert without a string message argument.
// Once C++17 becomes the default, this macro can be removed in favor of the
// new static_assert(condition) overload.
#define STATIC_ASSERT(test) static_assert(test, #test)
namespace v8 {
@ -276,6 +280,12 @@ struct Use {
(void)unused_tmp_array_for_use_macro; \
} while (false)
// Evaluate the instantiations of an expression with parameter packs.
// Since USE has left-to-right evaluation order of it's arguments,
// the parameter pack is iterated from left to right and side effects
// have defined behavior.
#define ITERATE_PACK(...) USE(0, ((__VA_ARGS__), 0)...)
} // namespace base
} // namespace v8
@ -346,47 +356,37 @@ V8_INLINE A implicit_cast(A x) {
// write V8_2PART_UINT64_C(0x12345678,90123456);
#define V8_2PART_UINT64_C(a, b) (((static_cast<uint64_t>(a) << 32) + 0x##b##u))
// Compute the 0-relative offset of some absolute value x of type T.
// This allows conversion of Addresses and integral types into
// 0-relative int offsets.
template <typename T>
constexpr inline intptr_t OffsetFrom(T x) {
return x - static_cast<T>(0);
}
// Compute the absolute value of type T for some 0-relative offset x.
// This allows conversion of 0-relative int offsets into Addresses and
// integral types.
template <typename T>
constexpr inline T AddressFrom(intptr_t x) {
return static_cast<T>(static_cast<T>(0) + x);
}
// Return the largest multiple of m which is <= x.
template <typename T>
inline T RoundDown(T x, intptr_t m) {
STATIC_ASSERT(std::is_integral<T>::value);
// m must be a power of two.
DCHECK(m != 0 && ((m & (m - 1)) == 0));
return AddressFrom<T>(OffsetFrom(x) & -m);
return x & -m;
}
template <intptr_t m, typename T>
constexpr inline T RoundDown(T x) {
STATIC_ASSERT(std::is_integral<T>::value);
// m must be a power of two.
STATIC_ASSERT(m != 0 && ((m & (m - 1)) == 0));
return AddressFrom<T>(OffsetFrom(x) & -m);
return x & -m;
}
// Return the smallest multiple of m which is >= x.
template <typename T>
inline T RoundUp(T x, intptr_t m) {
STATIC_ASSERT(std::is_integral<T>::value);
return RoundDown<T>(static_cast<T>(x + m - 1), m);
}
template <intptr_t m, typename T>
constexpr inline T RoundUp(T x) {
return RoundDown<m, T>(static_cast<T>(x + m - 1));
STATIC_ASSERT(std::is_integral<T>::value);
return RoundDown<m, T>(static_cast<T>(x + (m - 1)));
}
template <typename T, typename U>
inline bool IsAligned(T value, U alignment) {
return (value & (alignment - 1)) == 0;
}
inline void* AlignedAddress(void* address, size_t alignment) {

View File

@ -123,7 +123,7 @@ class Optional {
public:
using value_type = T;
constexpr Optional() {}
constexpr Optional() = default;
constexpr Optional(base::nullopt_t) {} // NOLINT(runtime/explicit)

View File

@ -24,11 +24,9 @@ STATIC_ASSERT_ENUM(PageAllocator::kReadExecute,
#undef STATIC_ASSERT_ENUM
size_t PageAllocator::AllocatePageSize() {
return base::OS::AllocatePageSize();
}
size_t PageAllocator::CommitPageSize() { return base::OS::CommitPageSize(); }
PageAllocator::PageAllocator()
: allocate_page_size_(base::OS::AllocatePageSize()),
commit_page_size_(base::OS::CommitPageSize()) {}
void PageAllocator::SetRandomMmapSeed(int64_t seed) {
base::OS::SetRandomMmapSeed(seed);

View File

@ -15,11 +15,12 @@ namespace base {
class V8_BASE_EXPORT PageAllocator
: public NON_EXPORTED_BASE(::v8::PageAllocator) {
public:
virtual ~PageAllocator() = default;
PageAllocator();
~PageAllocator() override = default;
size_t AllocatePageSize() override;
size_t AllocatePageSize() override { return allocate_page_size_; }
size_t CommitPageSize() override;
size_t CommitPageSize() override { return commit_page_size_; }
void SetRandomMmapSeed(int64_t seed) override;
@ -34,6 +35,10 @@ class V8_BASE_EXPORT PageAllocator
bool SetPermissions(void* address, size_t size,
PageAllocator::Permission access) override;
private:
const size_t allocate_page_size_;
const size_t commit_page_size_;
};
} // namespace base

View File

@ -3,4 +3,6 @@ set noparent
hpayer@chromium.org
mlippautz@chromium.org
per-file platform-fuchsia.cc=wez@chromium.org
# COMPONENT: Blink>JavaScript

View File

@ -57,8 +57,8 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
strlen(kVirtualMemoryName));
uintptr_t reservation;
uint32_t prot = GetProtectionFromMemoryPermission(access);
zx_status_t status = zx_vmar_map_old(zx_vmar_root_self(), 0, vmo, 0,
request_size, prot, &reservation);
zx_status_t status = zx_vmar_map(zx_vmar_root_self(), prot, 0, vmo, 0,
request_size, &reservation);
// Either the vmo is now referenced by the vmar, or we failed and are bailing,
// so close the vmo either way.
zx_handle_close(vmo);
@ -67,7 +67,8 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
}
uint8_t* base = reinterpret_cast<uint8_t*>(reservation);
uint8_t* aligned_base = RoundUp(base, alignment);
uint8_t* aligned_base = reinterpret_cast<uint8_t*>(
RoundUp(reinterpret_cast<uintptr_t>(base), alignment));
// Unmap extra memory reserved before and after the desired block.
if (aligned_base != base) {
@ -114,9 +115,8 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
DCHECK_EQ(0, size % CommitPageSize());
uint32_t prot = GetProtectionFromMemoryPermission(access);
return zx_vmar_protect_old(zx_vmar_root_self(),
reinterpret_cast<uintptr_t>(address), size,
prot) == ZX_OK;
return zx_vmar_protect(zx_vmar_root_self(), prot,
reinterpret_cast<uintptr_t>(address), size) == ZX_OK;
}
// static

View File

@ -27,14 +27,6 @@
#include <sys/types.h> // mmap & munmap
#include <unistd.h> // sysconf
// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
// Old versions of the C library <signal.h> didn't define the type.
#if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
(defined(__arm__) || defined(__aarch64__)) && \
!defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
#include <asm/sigcontext.h> // NOLINT
#endif
#include <cmath>
#undef MAP_TYPE

View File

@ -15,7 +15,7 @@ class PosixDefaultTimezoneCache : public PosixTimezoneCache {
const char* LocalTimezone(double time_ms) override;
double LocalTimeOffset(double time_ms, bool is_utc) override;
~PosixDefaultTimezoneCache() override {}
~PosixDefaultTimezoneCache() override = default;
};
} // namespace base

View File

@ -86,7 +86,7 @@ namespace base {
namespace {
// 0 is never a valid thread id.
const pthread_t kNoThread = (pthread_t) 0;
const pthread_t kNoThread = static_cast<pthread_t>(0);
bool g_hard_abort = false;
@ -254,10 +254,6 @@ void* OS::GetRandomMmapAddr() {
// Little-endian Linux: 46 bits of virtual addressing.
raw_addr &= uint64_t{0x3FFFFFFF0000};
#endif
#elif V8_TARGET_ARCH_MIPS64
// We allocate code in 256 MB aligned segments because of optimizations using
// J instruction that require that all code is within a single 256 MB segment
raw_addr &= uint64_t{0x3FFFE0000000};
#elif V8_TARGET_ARCH_S390X
// Linux on Z uses bits 22-32 for Region Indexing, which translates to 42 bits
// of virtual addressing. Truncate to 40 bits to allow kernel chance to
@ -267,6 +263,10 @@ void* OS::GetRandomMmapAddr() {
// 31 bits of virtual addressing. Truncate to 29 bits to allow kernel chance
// to fulfill request.
raw_addr &= 0x1FFFF000;
#elif V8_TARGET_ARCH_MIPS64
// 42 bits of virtual addressing. Truncate to 40 bits to allow kernel chance
// to fulfill request.
raw_addr &= uint64_t{0xFFFFFF0000};
#else
raw_addr &= 0x3FFFF000;
@ -313,7 +313,8 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
// Unmap memory allocated before the aligned base address.
uint8_t* base = static_cast<uint8_t*>(result);
uint8_t* aligned_base = RoundUp(base, alignment);
uint8_t* aligned_base = reinterpret_cast<uint8_t*>(
RoundUp(reinterpret_cast<uintptr_t>(base), alignment));
if (aligned_base != base) {
DCHECK_LT(base, aligned_base);
size_t prefix_size = static_cast<size_t>(aligned_base - base);

View File

@ -15,7 +15,7 @@ class PosixTimezoneCache : public TimezoneCache {
public:
double DaylightSavingsOffset(double time_ms) override;
void Clear() override {}
~PosixTimezoneCache() override {}
~PosixTimezoneCache() override = default;
protected:
static const int msPerSecond = 1000;

View File

@ -822,7 +822,8 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
if (base == nullptr) return nullptr; // Can't allocate, we're OOM.
// If address is suitably aligned, we're done.
uint8_t* aligned_base = RoundUp(base, alignment);
uint8_t* aligned_base = reinterpret_cast<uint8_t*>(
RoundUp(reinterpret_cast<uintptr_t>(base), alignment));
if (base == aligned_base) return reinterpret_cast<void*>(base);
// Otherwise, free it and try a larger allocation.
@ -843,7 +844,8 @@ void* OS::Allocate(void* address, size_t size, size_t alignment,
// Try to trim the allocation by freeing the padded allocation and then
// calling VirtualAlloc at the aligned base.
CHECK(Free(base, padded_size));
aligned_base = RoundUp(base, alignment);
aligned_base = reinterpret_cast<uint8_t*>(
RoundUp(reinterpret_cast<uintptr_t>(base), alignment));
base = reinterpret_cast<uint8_t*>(
VirtualAlloc(aligned_base, size, flags, protect));
// We might not get the reduced allocation due to a race. In that case,

View File

@ -188,7 +188,7 @@ class V8_BASE_EXPORT OS {
class V8_BASE_EXPORT MemoryMappedFile {
public:
virtual ~MemoryMappedFile() {}
virtual ~MemoryMappedFile() = default;
virtual void* memory() const = 0;
virtual size_t size() const = 0;

View File

@ -91,7 +91,9 @@ void Semaphore::Signal() {
// This check may fail with <libc-2.21, which we use on the try bots, if the
// semaphore is destroyed while sem_post is still executed. A work around is
// to extend the lifetime of the semaphore.
CHECK_EQ(0, result);
if (result != 0) {
FATAL("Error when signaling semaphore, errno: %d", errno);
}
}

View File

@ -105,10 +105,7 @@ class V8_BASE_EXPORT TimeDelta final {
static TimeDelta FromTimespec(struct timespec ts);
struct timespec ToTimespec() const;
TimeDelta& operator=(const TimeDelta& other) {
delta_ = other.delta_;
return *this;
}
TimeDelta& operator=(const TimeDelta& other) = default;
// Computations with other deltas.
TimeDelta operator+(const TimeDelta& other) const {

291
deps/v8/src/base/region-allocator.cc vendored Normal file
View File

@ -0,0 +1,291 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/base/region-allocator.h"
#include "src/base/bits.h"
#include "src/base/macros.h"
namespace v8 {
namespace base {
// If |free_size| < |region_size| * |kMaxLoadFactorForRandomization| stop trying
// to randomize region allocation.
constexpr double kMaxLoadFactorForRandomization = 0.40;
// Max number of attempts to allocate page at random address.
constexpr int kMaxRandomizationAttempts = 3;
RegionAllocator::RegionAllocator(Address memory_region_begin,
size_t memory_region_size, size_t page_size)
: whole_region_(memory_region_begin, memory_region_size, false),
region_size_in_pages_(size() / page_size),
max_load_for_randomization_(
static_cast<size_t>(size() * kMaxLoadFactorForRandomization)),
free_size_(0),
page_size_(page_size) {
CHECK_LT(begin(), end());
CHECK(base::bits::IsPowerOfTwo(page_size_));
CHECK(IsAligned(size(), page_size_));
CHECK(IsAligned(begin(), page_size_));
// Initial region.
Region* region = new Region(whole_region_);
all_regions_.insert(region);
FreeListAddRegion(region);
}
RegionAllocator::~RegionAllocator() {
for (Region* region : all_regions_) {
delete region;
}
}
RegionAllocator::AllRegionsSet::iterator RegionAllocator::FindRegion(
Address address) {
if (!whole_region_.contains(address)) return all_regions_.end();
Region key(address, 0, false);
AllRegionsSet::iterator iter = all_regions_.upper_bound(&key);
// Regions in |all_regions_| are compared by end() values and key's end()
// points exactly to the address we are querying, so the upper_bound will
// find the region whose |end()| is greater than the requested address.
DCHECK_NE(iter, all_regions_.end());
DCHECK((*iter)->contains(address));
return iter;
}
void RegionAllocator::FreeListAddRegion(Region* region) {
free_size_ += region->size();
free_regions_.insert(region);
}
RegionAllocator::Region* RegionAllocator::FreeListFindRegion(size_t size) {
Region key(0, size, false);
auto iter = free_regions_.lower_bound(&key);
return iter == free_regions_.end() ? nullptr : *iter;
}
void RegionAllocator::FreeListRemoveRegion(Region* region) {
DCHECK(!region->is_used());
auto iter = free_regions_.find(region);
DCHECK_NE(iter, free_regions_.end());
DCHECK_EQ(region, *iter);
DCHECK_LE(region->size(), free_size_);
free_size_ -= region->size();
free_regions_.erase(iter);
}
RegionAllocator::Region* RegionAllocator::Split(Region* region,
size_t new_size) {
DCHECK(IsAligned(new_size, page_size_));
DCHECK_NE(new_size, 0);
DCHECK_GT(region->size(), new_size);
// Create new region and put it to the lists after the |region|.
bool used = region->is_used();
Region* new_region =
new Region(region->begin() + new_size, region->size() - new_size, used);
if (!used) {
// Remove region from the free list before updating it's size.
FreeListRemoveRegion(region);
}
region->set_size(new_size);
all_regions_.insert(new_region);
if (!used) {
FreeListAddRegion(region);
FreeListAddRegion(new_region);
}
return new_region;
}
void RegionAllocator::Merge(AllRegionsSet::iterator prev_iter,
AllRegionsSet::iterator next_iter) {
Region* prev = *prev_iter;
Region* next = *next_iter;
DCHECK_EQ(prev->end(), next->begin());
prev->set_size(prev->size() + next->size());
all_regions_.erase(next_iter); // prev_iter stays valid.
// The |next| region must already not be in the free list.
DCHECK_EQ(free_regions_.find(next), free_regions_.end());
delete next;
}
RegionAllocator::Address RegionAllocator::AllocateRegion(size_t size) {
DCHECK_NE(size, 0);
DCHECK(IsAligned(size, page_size_));
Region* region = FreeListFindRegion(size);
if (region == nullptr) return kAllocationFailure;
if (region->size() != size) {
Split(region, size);
}
DCHECK(IsAligned(region->begin(), page_size_));
DCHECK_EQ(region->size(), size);
// Mark region as used.
FreeListRemoveRegion(region);
region->set_is_used(true);
return region->begin();
}
RegionAllocator::Address RegionAllocator::AllocateRegion(
RandomNumberGenerator* rng, size_t size) {
if (free_size() >= max_load_for_randomization_) {
// There is enough free space for trying to randomize the address.
size_t random = 0;
for (int i = 0; i < kMaxRandomizationAttempts; i++) {
rng->NextBytes(&random, sizeof(random));
size_t random_offset = page_size_ * (random % region_size_in_pages_);
Address address = begin() + random_offset;
if (AllocateRegionAt(address, size)) {
return address;
}
}
// Fall back to free list allocation.
}
return AllocateRegion(size);
}
bool RegionAllocator::AllocateRegionAt(Address requested_address, size_t size) {
DCHECK(IsAligned(requested_address, page_size_));
DCHECK_NE(size, 0);
DCHECK(IsAligned(size, page_size_));
Address requested_end = requested_address + size;
DCHECK_LE(requested_end, end());
Region* region;
{
AllRegionsSet::iterator region_iter = FindRegion(requested_address);
if (region_iter == all_regions_.end()) {
return false;
}
region = *region_iter;
}
if (region->is_used() || region->end() < requested_end) {
return false;
}
// Found free region that includes the requested one.
if (region->begin() != requested_address) {
// Split the region at the |requested_address| boundary.
size_t new_size = requested_address - region->begin();
DCHECK(IsAligned(new_size, page_size_));
region = Split(region, new_size);
}
if (region->end() != requested_end) {
// Split the region at the |requested_end| boundary.
Split(region, size);
}
DCHECK_EQ(region->begin(), requested_address);
DCHECK_EQ(region->size(), size);
// Mark region as used.
FreeListRemoveRegion(region);
region->set_is_used(true);
return true;
}
size_t RegionAllocator::TrimRegion(Address address, size_t new_size) {
DCHECK(IsAligned(new_size, page_size_));
AllRegionsSet::iterator region_iter = FindRegion(address);
if (region_iter == all_regions_.end()) {
return 0;
}
Region* region = *region_iter;
if (region->begin() != address || !region->is_used()) {
return 0;
}
// The region must not be in the free list.
DCHECK_EQ(free_regions_.find(*region_iter), free_regions_.end());
if (new_size > 0) {
region = Split(region, new_size);
++region_iter;
}
size_t size = region->size();
region->set_is_used(false);
// Merge current region with the surrounding ones if they are free.
if (region->end() != whole_region_.end()) {
// There must be a range after the current one.
AllRegionsSet::iterator next_iter = std::next(region_iter);
DCHECK_NE(next_iter, all_regions_.end());
if (!(*next_iter)->is_used()) {
// |next| region object will be deleted during merge, remove it from
// the free list.
FreeListRemoveRegion(*next_iter);
Merge(region_iter, next_iter);
}
}
if (new_size == 0 && region->begin() != whole_region_.begin()) {
// There must be a range before the current one.
AllRegionsSet::iterator prev_iter = std::prev(region_iter);
DCHECK_NE(prev_iter, all_regions_.end());
if (!(*prev_iter)->is_used()) {
// |prev| region's size will change, we'll have to re-insert it into
// the proper place of the free list.
FreeListRemoveRegion(*prev_iter);
Merge(prev_iter, region_iter);
// |prev| region becomes the current region.
region_iter = prev_iter;
region = *region_iter;
}
}
FreeListAddRegion(region);
return size;
}
size_t RegionAllocator::CheckRegion(Address address) {
AllRegionsSet::iterator region_iter = FindRegion(address);
if (region_iter == all_regions_.end()) {
return 0;
}
Region* region = *region_iter;
if (region->begin() != address || !region->is_used()) {
return 0;
}
return region->size();
}
void RegionAllocator::Region::Print(std::ostream& os) const {
std::ios::fmtflags flags = os.flags(std::ios::hex | std::ios::showbase);
os << "[" << begin() << ", " << end() << "), size: " << size();
os << ", " << (is_used() ? "used" : "free");
os.flags(flags);
}
void RegionAllocator::Print(std::ostream& os) const {
std::ios::fmtflags flags = os.flags(std::ios::hex | std::ios::showbase);
os << "RegionAllocator: [" << begin() << ", " << end() << ")";
os << "\nsize: " << size();
os << "\nfree_size: " << free_size();
os << "\npage_size: " << page_size_;
os << "\nall regions: ";
for (const Region* region : all_regions_) {
os << "\n ";
region->Print(os);
}
os << "\nfree regions: ";
for (const Region* region : free_regions_) {
os << "\n ";
region->Print(os);
}
os << "\n";
os.flags(flags);
}
} // namespace base
} // namespace v8

164
deps/v8/src/base/region-allocator.h vendored Normal file
View File

@ -0,0 +1,164 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_BASE_REGION_ALLOCATOR_H_
#define V8_BASE_REGION_ALLOCATOR_H_
#include <set>
#include "src/base/address-region.h"
#include "src/base/utils/random-number-generator.h"
#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
namespace v8 {
namespace base {
// Helper class for managing used/free regions within [address, address+size)
// region. Minimum allocation unit is |page_size|. Requested allocation size
// is rounded up to |page_size|.
// The region allocation algorithm implements best-fit with coalescing strategy:
// it tries to find a smallest suitable free region upon allocation and tries
// to merge region with its neighbors upon freeing.
//
// This class does not perform any actual region reservation.
// Not thread-safe.
class V8_BASE_EXPORT RegionAllocator final {
public:
typedef uintptr_t Address;
static constexpr Address kAllocationFailure = static_cast<Address>(-1);
RegionAllocator(Address address, size_t size, size_t page_size);
~RegionAllocator();
// Allocates region of |size| (must be |page_size|-aligned). Returns
// the address of the region on success or kAllocationFailure.
Address AllocateRegion(size_t size);
// Same as above but tries to randomize the region displacement.
Address AllocateRegion(RandomNumberGenerator* rng, size_t size);
// Allocates region of |size| at |requested_address| if it's free. Both the
// address and the size must be |page_size|-aligned. On success returns
// true.
// This kind of allocation is supposed to be used during setup phase to mark
// certain regions as used or for randomizing regions displacement.
bool AllocateRegionAt(Address requested_address, size_t size);
// Frees region at given |address|, returns the size of the region.
// There must be a used region starting at given address otherwise nothing
// will be freed and 0 will be returned.
size_t FreeRegion(Address address) { return TrimRegion(address, 0); }
// Decreases size of the previously allocated region at |address|, returns
// freed size. |new_size| must be |page_size|-aligned and
// less than or equal to current region's size. Setting new size to zero
// frees the region.
size_t TrimRegion(Address address, size_t new_size);
// If there is a used region starting at given address returns its size
// otherwise 0.
size_t CheckRegion(Address address);
Address begin() const { return whole_region_.begin(); }
Address end() const { return whole_region_.end(); }
size_t size() const { return whole_region_.size(); }
bool contains(Address address) const {
return whole_region_.contains(address);
}
bool contains(Address address, size_t size) const {
return whole_region_.contains(address, size);
}
// Total size of not yet aquired regions.
size_t free_size() const { return free_size_; }
// The alignment of the allocated region's addresses and granularity of
// the allocated region's sizes.
size_t page_size() const { return page_size_; }
void Print(std::ostream& os) const;
private:
class Region : public AddressRegion {
public:
Region(Address address, size_t size, bool is_used)
: AddressRegion(address, size), is_used_(is_used) {}
bool is_used() const { return is_used_; }
void set_is_used(bool used) { is_used_ = used; }
void Print(std::ostream& os) const;
private:
bool is_used_;
};
// The whole region.
const Region whole_region_;
// Number of |page_size_| in the whole region.
const size_t region_size_in_pages_;
// If the free size is less than this value - stop trying to randomize the
// allocation addresses.
const size_t max_load_for_randomization_;
// Size of all free regions.
size_t free_size_;
// Minimum region size. Must be a pow of 2.
const size_t page_size_;
struct AddressEndOrder {
bool operator()(const Region* a, const Region* b) const {
return a->end() < b->end();
}
};
// All regions ordered by addresses.
typedef std::set<Region*, AddressEndOrder> AllRegionsSet;
AllRegionsSet all_regions_;
struct SizeAddressOrder {
bool operator()(const Region* a, const Region* b) const {
if (a->size() != b->size()) return a->size() < b->size();
return a->begin() < b->begin();
}
};
// Free regions ordered by sizes and addresses.
std::set<Region*, SizeAddressOrder> free_regions_;
// Returns region containing given address or nullptr.
AllRegionsSet::iterator FindRegion(Address address);
// Adds given region to the set of free regions.
void FreeListAddRegion(Region* region);
// Finds best-fit free region for given size.
Region* FreeListFindRegion(size_t size);
// Removes given region from the set of free regions.
void FreeListRemoveRegion(Region* region);
// Splits given |region| into two: one of |new_size| size and a new one
// having the rest. The new region is returned.
Region* Split(Region* region, size_t new_size);
// For two coalescing regions merges |next| to |prev| and deletes |next|.
void Merge(AllRegionsSet::iterator prev_iter,
AllRegionsSet::iterator next_iter);
FRIEND_TEST(RegionAllocatorTest, AllocateRegionRandom);
FRIEND_TEST(RegionAllocatorTest, Fragmentation);
FRIEND_TEST(RegionAllocatorTest, FindRegion);
FRIEND_TEST(RegionAllocatorTest, Contains);
DISALLOW_COPY_AND_ASSIGN(RegionAllocator);
};
} // namespace base
} // namespace v8
#endif // V8_BASE_REGION_ALLOCATOR_H_

View File

@ -49,7 +49,7 @@ class CheckedNumeric {
public:
typedef T type;
CheckedNumeric() {}
CheckedNumeric() = default;
// Copy constructor.
template <typename Src>

267
deps/v8/src/base/threaded-list.h vendored Normal file
View File

@ -0,0 +1,267 @@
// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_BASE_THREADED_LIST_H_
#define V8_BASE_THREADED_LIST_H_
#include <iterator>
#include "src/base/compiler-specific.h"
#include "src/base/macros.h"
namespace v8 {
namespace base {
template <typename T>
struct ThreadedListTraits {
static T** next(T* t) { return t->next(); }
};
// Represents a linked list that threads through the nodes in the linked list.
// Entries in the list are pointers to nodes. By default nodes need to have a
// T** next() method that returns the location where the next value is stored.
// The default can be overwritten by providing a ThreadedTraits class.
template <typename T, typename BaseClass,
typename TLTraits = ThreadedListTraits<T>>
class ThreadedListBase final : public BaseClass {
public:
ThreadedListBase() : head_(nullptr), tail_(&head_) {}
void Add(T* v) {
DCHECK_NULL(*tail_);
DCHECK_NULL(*TLTraits::next(v));
*tail_ = v;
tail_ = TLTraits::next(v);
}
void AddFront(T* v) {
DCHECK_NULL(*TLTraits::next(v));
DCHECK_NOT_NULL(v);
T** const next = TLTraits::next(v);
*next = head_;
if (head_ == nullptr) tail_ = next;
head_ = v;
}
// Reinitializing the head to a new node, this costs O(n).
void ReinitializeHead(T* v) {
head_ = v;
T* current = v;
if (current != nullptr) { // Find tail
T* tmp;
while ((tmp = *TLTraits::next(current))) {
current = tmp;
}
tail_ = TLTraits::next(current);
} else {
tail_ = &head_;
}
}
void DropHead() {
DCHECK_NOT_NULL(head_);
T* old_head = head_;
head_ = *TLTraits::next(head_);
if (head_ == nullptr) tail_ = &head_;
*TLTraits::next(old_head) = nullptr;
}
void Append(ThreadedListBase&& list) {
*tail_ = list.head_;
tail_ = list.tail_;
list.Clear();
}
void Prepend(ThreadedListBase&& list) {
if (list.head_ == nullptr) return;
T* new_head = list.head_;
*list.tail_ = head_;
if (head_ == nullptr) {
tail_ = list.tail_;
}
head_ = new_head;
list.Clear();
}
void Clear() {
head_ = nullptr;
tail_ = &head_;
}
ThreadedListBase& operator=(ThreadedListBase&& other) V8_NOEXCEPT {
head_ = other.head_;
tail_ = other.head_ ? other.tail_ : &head_;
#ifdef DEBUG
other.Clear();
#endif
return *this;
}
ThreadedListBase(ThreadedListBase&& other) V8_NOEXCEPT
: head_(other.head_),
tail_(other.head_ ? other.tail_ : &head_) {
#ifdef DEBUG
other.Clear();
#endif
}
bool Remove(T* v) {
T* current = first();
if (current == v) {
DropHead();
return true;
}
while (current != nullptr) {
T* next = *TLTraits::next(current);
if (next == v) {
*TLTraits::next(current) = *TLTraits::next(next);
*TLTraits::next(next) = nullptr;
if (TLTraits::next(next) == tail_) {
tail_ = TLTraits::next(current);
}
return true;
}
current = next;
}
return false;
}
class Iterator final {
public:
using iterator_category = std::forward_iterator_tag;
using difference_type = std::ptrdiff_t;
using value_type = T*;
using reference = value_type;
using pointer = value_type*;
public:
Iterator& operator++() {
entry_ = TLTraits::next(*entry_);
return *this;
}
bool operator==(const Iterator& other) const {
return entry_ == other.entry_;
}
bool operator!=(const Iterator& other) const {
return entry_ != other.entry_;
}
T* operator*() { return *entry_; }
T* operator->() { return *entry_; }
Iterator& operator=(T* entry) {
T* next = *TLTraits::next(*entry_);
*TLTraits::next(entry) = next;
*entry_ = entry;
return *this;
}
private:
explicit Iterator(T** entry) : entry_(entry) {}
T** entry_;
friend class ThreadedListBase;
};
class ConstIterator final {
public:
using iterator_category = std::forward_iterator_tag;
using difference_type = std::ptrdiff_t;
using value_type = T*;
using reference = const value_type;
using pointer = const value_type*;
public:
ConstIterator& operator++() {
entry_ = TLTraits::next(*entry_);
return *this;
}
bool operator==(const ConstIterator& other) const {
return entry_ == other.entry_;
}
bool operator!=(const ConstIterator& other) const {
return entry_ != other.entry_;
}
const T* operator*() const { return *entry_; }
private:
explicit ConstIterator(T* const* entry) : entry_(entry) {}
T* const* entry_;
friend class ThreadedListBase;
};
Iterator begin() { return Iterator(&head_); }
Iterator end() { return Iterator(tail_); }
ConstIterator begin() const { return ConstIterator(&head_); }
ConstIterator end() const { return ConstIterator(tail_); }
// Rewinds the list's tail to the reset point, i.e., cutting of the rest of
// the list, including the reset_point.
void Rewind(Iterator reset_point) {
tail_ = reset_point.entry_;
*tail_ = nullptr;
}
// Moves the tail of the from_list, starting at the from_location, to the end
// of this list.
void MoveTail(ThreadedListBase* from_list, Iterator from_location) {
if (from_list->end() != from_location) {
DCHECK_NULL(*tail_);
*tail_ = *from_location;
tail_ = from_list->tail_;
from_list->Rewind(from_location);
}
}
bool is_empty() const { return head_ == nullptr; }
T* first() const { return head_; }
// Slow. For testing purposes.
int LengthForTest() {
int result = 0;
for (Iterator t = begin(); t != end(); ++t) ++result;
return result;
}
T* AtForTest(int i) {
Iterator t = begin();
while (i-- > 0) ++t;
return *t;
}
bool Verify() {
T* last = this->first();
if (last == nullptr) {
CHECK_EQ(&head_, tail_);
} else {
while (*TLTraits::next(last) != nullptr) {
last = *TLTraits::next(last);
}
CHECK_EQ(TLTraits::next(last), tail_);
}
return true;
}
private:
T* head_;
T** tail_;
DISALLOW_COPY_AND_ASSIGN(ThreadedListBase);
};
struct EmptyBase {};
template <typename T, typename TLTraits = ThreadedListTraits<T>>
using ThreadedList = ThreadedListBase<T, EmptyBase, TLTraits>;
} // namespace base
} // namespace v8
#endif // V8_BASE_THREADED_LIST_H_

Some files were not shown because too many files have changed in this diff Show More