deps: update V8 to 5.7.492.69

PR-URL: https://github.com/nodejs/node/pull/11752
Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl>
Reviewed-By: Franziska Hinkelmann <franziska.hinkelmann@gmail.com>
This commit is contained in:
Michaël Zasso 2017-03-21 10:16:54 +01:00
parent e0bc5a7361
commit c459d8ea5d
1726 changed files with 101053 additions and 86648 deletions

View File

@ -1,4 +1,5 @@
# Defines the Google C++ style for automatic reformatting. # Defines the Google C++ style for automatic reformatting.
# http://clang.llvm.org/docs/ClangFormatStyleOptions.html # http://clang.llvm.org/docs/ClangFormatStyleOptions.html
BasedOnStyle: Google BasedOnStyle: Google
DerivePointerAlignment: false
MaxEmptyLinesToKeep: 1 MaxEmptyLinesToKeep: 1

31
deps/v8/.gn vendored
View File

@ -2,6 +2,8 @@
# tree and to set startup options. For documentation on the values set in this # tree and to set startup options. For documentation on the values set in this
# file, run "gn help dotfile" at the command line. # file, run "gn help dotfile" at the command line.
import("//build/dotfile_settings.gni")
# The location of the build configuration file. # The location of the build configuration file.
buildconfig = "//build/config/BUILDCONFIG.gn" buildconfig = "//build/config/BUILDCONFIG.gn"
@ -19,30 +21,5 @@ check_targets = []
# These are the list of GN files that run exec_script. This whitelist exists # These are the list of GN files that run exec_script. This whitelist exists
# to force additional review for new uses of exec_script, which is strongly # to force additional review for new uses of exec_script, which is strongly
# discouraged except for gypi_to_gn calls. # discouraged except for gypi_to_gn calls.
exec_script_whitelist = [ exec_script_whitelist =
"//build/config/android/BUILD.gn", build_dotfile_settings.exec_script_whitelist + [ "//test/test262/BUILD.gn" ]
"//build/config/android/config.gni",
"//build/config/android/internal_rules.gni",
"//build/config/android/rules.gni",
"//build/config/BUILD.gn",
"//build/config/compiler/BUILD.gn",
"//build/config/gcc/gcc_version.gni",
"//build/config/ios/ios_sdk.gni",
"//build/config/linux/atk/BUILD.gn",
"//build/config/linux/BUILD.gn",
"//build/config/linux/pkg_config.gni",
"//build/config/mac/mac_sdk.gni",
"//build/config/posix/BUILD.gn",
"//build/config/sysroot.gni",
"//build/config/win/BUILD.gn",
"//build/config/win/visual_studio_version.gni",
"//build/gn_helpers.py",
"//build/gypi_to_gn.py",
"//build/toolchain/concurrent_links.gni",
"//build/toolchain/gcc_toolchain.gni",
"//build/toolchain/mac/BUILD.gn",
"//build/toolchain/win/BUILD.gn",
"//build/util/branding.gni",
"//build/util/version.gni",
"//test/test262/BUILD.gn",
]

2
deps/v8/AUTHORS vendored
View File

@ -81,6 +81,7 @@ Julien Brianceau <jbriance@cisco.com>
JunHo Seo <sejunho@gmail.com> JunHo Seo <sejunho@gmail.com>
Kang-Hao (Kenny) Lu <kennyluck@csail.mit.edu> Kang-Hao (Kenny) Lu <kennyluck@csail.mit.edu>
Karl Skomski <karl@skomski.com> Karl Skomski <karl@skomski.com>
Kevin Gibbons <bakkot@gmail.com>
Luis Reis <luis.m.reis@gmail.com> Luis Reis <luis.m.reis@gmail.com>
Luke Zarko <lukezarko@gmail.com> Luke Zarko <lukezarko@gmail.com>
Maciej Małecki <me@mmalecki.com> Maciej Małecki <me@mmalecki.com>
@ -104,6 +105,7 @@ Patrick Gansterer <paroga@paroga.com>
Peter Rybin <peter.rybin@gmail.com> Peter Rybin <peter.rybin@gmail.com>
Peter Varga <pvarga@inf.u-szeged.hu> Peter Varga <pvarga@inf.u-szeged.hu>
Paul Lind <plind44@gmail.com> Paul Lind <plind44@gmail.com>
Qiuyi Zhang <qiuyi.zqy@alibaba-inc.com>
Rafal Krypa <rafal@krypa.net> Rafal Krypa <rafal@krypa.net>
Refael Ackermann <refack@gmail.com> Refael Ackermann <refack@gmail.com>
Rene Rebe <rene@exactcode.de> Rene Rebe <rene@exactcode.de>

197
deps/v8/BUILD.gn vendored
View File

@ -14,8 +14,6 @@ if (is_android) {
import("gni/v8.gni") import("gni/v8.gni")
import("gni/isolate.gni") import("gni/isolate.gni")
import("//build_overrides/v8.gni")
import("snapshot_toolchain.gni") import("snapshot_toolchain.gni")
declare_args() { declare_args() {
@ -23,7 +21,10 @@ declare_args() {
v8_android_log_stdout = false v8_android_log_stdout = false
# Sets -DVERIFY_HEAP. # Sets -DVERIFY_HEAP.
v8_enable_verify_heap = false v8_enable_verify_heap = ""
# Sets -DVERIFY_PREDICTABLE
v8_enable_verify_predictable = false
# Enable compiler warnings when using V8_DEPRECATED apis. # Enable compiler warnings when using V8_DEPRECATED apis.
v8_deprecation_warnings = false v8_deprecation_warnings = false
@ -51,7 +52,13 @@ declare_args() {
v8_interpreted_regexp = false v8_interpreted_regexp = false
# Sets -dOBJECT_PRINT. # Sets -dOBJECT_PRINT.
v8_object_print = "" v8_enable_object_print = ""
# Sets -dTRACE_MAPS.
v8_enable_trace_maps = ""
# Sets -dV8_ENABLE_CHECKS.
v8_enable_v8_checks = ""
# With post mortem support enabled, metadata is embedded into libv8 that # With post mortem support enabled, metadata is embedded into libv8 that
# describes various parameters of the VM for use by debuggers. See # describes various parameters of the VM for use by debuggers. See
@ -89,11 +96,20 @@ if (v8_enable_gdbjit == "") {
} }
# Derived defaults. # Derived defaults.
if (v8_object_print == "") { if (v8_enable_verify_heap == "") {
v8_object_print = is_debug && !v8_optimized_debug v8_enable_verify_heap = is_debug
}
if (v8_enable_object_print == "") {
v8_enable_object_print = is_debug
} }
if (v8_enable_disassembler == "") { if (v8_enable_disassembler == "") {
v8_enable_disassembler = is_debug && !v8_optimized_debug v8_enable_disassembler = is_debug
}
if (v8_enable_trace_maps == "") {
v8_enable_trace_maps = is_debug
}
if (v8_enable_v8_checks == "") {
v8_enable_v8_checks = is_debug
} }
# Specifies if the target build is a simulator build. Comparing target cpu # Specifies if the target build is a simulator build. Comparing target cpu
@ -155,7 +171,7 @@ config("external_config") {
defines = [ "USING_V8_SHARED" ] defines = [ "USING_V8_SHARED" ]
} }
include_dirs = [ "include" ] include_dirs = [ "include" ]
if (v8_enable_inspector_override) { if (v8_enable_inspector) {
include_dirs += [ "$target_gen_dir/include" ] include_dirs += [ "$target_gen_dir/include" ]
} }
} }
@ -179,12 +195,21 @@ config("features") {
if (v8_enable_gdbjit) { if (v8_enable_gdbjit) {
defines += [ "ENABLE_GDB_JIT_INTERFACE" ] defines += [ "ENABLE_GDB_JIT_INTERFACE" ]
} }
if (v8_object_print) { if (v8_enable_object_print) {
defines += [ "OBJECT_PRINT" ] defines += [ "OBJECT_PRINT" ]
} }
if (v8_enable_verify_heap) { if (v8_enable_verify_heap) {
defines += [ "VERIFY_HEAP" ] defines += [ "VERIFY_HEAP" ]
} }
if (v8_enable_verify_predictable) {
defines += [ "VERIFY_PREDICTABLE" ]
}
if (v8_enable_trace_maps) {
defines += [ "TRACE_MAPS" ]
}
if (v8_enable_v8_checks) {
defines += [ "V8_ENABLE_CHECKS" ]
}
if (v8_interpreted_regexp) { if (v8_interpreted_regexp) {
defines += [ "V8_INTERPRETED_REGEXP" ] defines += [ "V8_INTERPRETED_REGEXP" ]
} }
@ -348,15 +373,7 @@ config("toolchain") {
ldflags += [ "-rdynamic" ] ldflags += [ "-rdynamic" ]
} }
# TODO(jochen): Add support for different debug optimization levels. defines += [ "DEBUG" ]
defines += [
"ENABLE_DISASSEMBLER",
"V8_ENABLE_CHECKS",
"OBJECT_PRINT",
"VERIFY_HEAP",
"DEBUG",
"TRACE_MAPS",
]
if (v8_enable_slow_dchecks) { if (v8_enable_slow_dchecks) {
defines += [ "ENABLE_SLOW_DCHECKS" ] defines += [ "ENABLE_SLOW_DCHECKS" ]
} }
@ -408,7 +425,6 @@ action("js2c") {
"src/js/prologue.js", "src/js/prologue.js",
"src/js/runtime.js", "src/js/runtime.js",
"src/js/v8natives.js", "src/js/v8natives.js",
"src/js/symbol.js",
"src/js/array.js", "src/js/array.js",
"src/js/string.js", "src/js/string.js",
"src/js/arraybuffer.js", "src/js/arraybuffer.js",
@ -422,6 +438,7 @@ action("js2c") {
"src/js/spread.js", "src/js/spread.js",
"src/js/proxy.js", "src/js/proxy.js",
"src/js/async-await.js", "src/js/async-await.js",
"src/js/harmony-string-padding.js",
"src/debug/mirrors.js", "src/debug/mirrors.js",
"src/debug/debug.js", "src/debug/debug.js",
"src/debug/liveedit.js", "src/debug/liveedit.js",
@ -466,7 +483,6 @@ action("js2c_experimental") {
"src/messages.h", "src/messages.h",
"src/js/harmony-atomics.js", "src/js/harmony-atomics.js",
"src/js/harmony-simd.js", "src/js/harmony-simd.js",
"src/js/harmony-string-padding.js",
] ]
outputs = [ outputs = [
@ -742,7 +758,7 @@ action("v8_dump_build_config") {
"is_tsan=$is_tsan", "is_tsan=$is_tsan",
"target_cpu=\"$target_cpu\"", "target_cpu=\"$target_cpu\"",
"v8_enable_i18n_support=$v8_enable_i18n_support", "v8_enable_i18n_support=$v8_enable_i18n_support",
"v8_enable_inspector=$v8_enable_inspector_override", "v8_enable_inspector=$v8_enable_inspector",
"v8_target_cpu=\"$v8_target_cpu\"", "v8_target_cpu=\"$v8_target_cpu\"",
"v8_use_snapshot=$v8_use_snapshot", "v8_use_snapshot=$v8_use_snapshot",
] ]
@ -848,6 +864,17 @@ if (v8_use_external_startup_data) {
} }
} }
# This is split out to be a non-code containing target that the Chromium browser
# DLL can depend upon to get only a version string.
v8_source_set("v8_version") {
configs = [ ":internal_config" ]
sources = [
"include/v8-version-string.h",
"include/v8-version.h",
]
}
v8_source_set("v8_base") { v8_source_set("v8_base") {
visibility = [ ":*" ] # Only targets in this file can depend on this. visibility = [ ":*" ] # Only targets in this file can depend on this.
@ -861,7 +888,6 @@ v8_source_set("v8_base") {
"include/v8-profiler.h", "include/v8-profiler.h",
"include/v8-testing.h", "include/v8-testing.h",
"include/v8-util.h", "include/v8-util.h",
"include/v8-version.h",
"include/v8.h", "include/v8.h",
"include/v8config.h", "include/v8config.h",
"src/accessors.cc", "src/accessors.cc",
@ -893,12 +919,15 @@ v8_source_set("v8_base") {
"src/asmjs/asm-wasm-builder.h", "src/asmjs/asm-wasm-builder.h",
"src/asmjs/switch-logic.cc", "src/asmjs/switch-logic.cc",
"src/asmjs/switch-logic.h", "src/asmjs/switch-logic.h",
"src/assembler-inl.h",
"src/assembler.cc", "src/assembler.cc",
"src/assembler.h", "src/assembler.h",
"src/assert-scope.cc", "src/assert-scope.cc",
"src/assert-scope.h", "src/assert-scope.h",
"src/ast/ast-expression-rewriter.cc", "src/ast/ast-expression-rewriter.cc",
"src/ast/ast-expression-rewriter.h", "src/ast/ast-expression-rewriter.h",
"src/ast/ast-function-literal-id-reindexer.cc",
"src/ast/ast-function-literal-id-reindexer.h",
"src/ast/ast-literal-reindexer.cc", "src/ast/ast-literal-reindexer.cc",
"src/ast/ast-literal-reindexer.h", "src/ast/ast-literal-reindexer.h",
"src/ast/ast-numbering.cc", "src/ast/ast-numbering.cc",
@ -919,7 +948,6 @@ v8_source_set("v8_base") {
"src/ast/modules.h", "src/ast/modules.h",
"src/ast/prettyprinter.cc", "src/ast/prettyprinter.cc",
"src/ast/prettyprinter.h", "src/ast/prettyprinter.h",
"src/ast/scopeinfo.cc",
"src/ast/scopes.cc", "src/ast/scopes.cc",
"src/ast/scopes.h", "src/ast/scopes.h",
"src/ast/variables.cc", "src/ast/variables.cc",
@ -944,6 +972,8 @@ v8_source_set("v8_base") {
"src/builtins/builtins-boolean.cc", "src/builtins/builtins-boolean.cc",
"src/builtins/builtins-call.cc", "src/builtins/builtins-call.cc",
"src/builtins/builtins-callsite.cc", "src/builtins/builtins-callsite.cc",
"src/builtins/builtins-constructor.cc",
"src/builtins/builtins-constructor.h",
"src/builtins/builtins-conversion.cc", "src/builtins/builtins-conversion.cc",
"src/builtins/builtins-dataview.cc", "src/builtins/builtins-dataview.cc",
"src/builtins/builtins-date.cc", "src/builtins/builtins-date.cc",
@ -953,14 +983,15 @@ v8_source_set("v8_base") {
"src/builtins/builtins-generator.cc", "src/builtins/builtins-generator.cc",
"src/builtins/builtins-global.cc", "src/builtins/builtins-global.cc",
"src/builtins/builtins-handler.cc", "src/builtins/builtins-handler.cc",
"src/builtins/builtins-ic.cc",
"src/builtins/builtins-internal.cc", "src/builtins/builtins-internal.cc",
"src/builtins/builtins-interpreter.cc", "src/builtins/builtins-interpreter.cc",
"src/builtins/builtins-iterator.cc",
"src/builtins/builtins-json.cc", "src/builtins/builtins-json.cc",
"src/builtins/builtins-math.cc", "src/builtins/builtins-math.cc",
"src/builtins/builtins-number.cc", "src/builtins/builtins-number.cc",
"src/builtins/builtins-object.cc", "src/builtins/builtins-object.cc",
"src/builtins/builtins-promise.cc", "src/builtins/builtins-promise.cc",
"src/builtins/builtins-promise.h",
"src/builtins/builtins-proxy.cc", "src/builtins/builtins-proxy.cc",
"src/builtins/builtins-reflect.cc", "src/builtins/builtins-reflect.cc",
"src/builtins/builtins-regexp.cc", "src/builtins/builtins-regexp.cc",
@ -1002,6 +1033,8 @@ v8_source_set("v8_base") {
"src/compiler-dispatcher/compiler-dispatcher-job.h", "src/compiler-dispatcher/compiler-dispatcher-job.h",
"src/compiler-dispatcher/compiler-dispatcher-tracer.cc", "src/compiler-dispatcher/compiler-dispatcher-tracer.cc",
"src/compiler-dispatcher/compiler-dispatcher-tracer.h", "src/compiler-dispatcher/compiler-dispatcher-tracer.h",
"src/compiler-dispatcher/compiler-dispatcher.cc",
"src/compiler-dispatcher/compiler-dispatcher.h",
"src/compiler-dispatcher/optimizing-compile-dispatcher.cc", "src/compiler-dispatcher/optimizing-compile-dispatcher.cc",
"src/compiler-dispatcher/optimizing-compile-dispatcher.h", "src/compiler-dispatcher/optimizing-compile-dispatcher.h",
"src/compiler.cc", "src/compiler.cc",
@ -1020,12 +1053,12 @@ v8_source_set("v8_base") {
"src/compiler/basic-block-instrumentor.h", "src/compiler/basic-block-instrumentor.h",
"src/compiler/branch-elimination.cc", "src/compiler/branch-elimination.cc",
"src/compiler/branch-elimination.h", "src/compiler/branch-elimination.h",
"src/compiler/bytecode-branch-analysis.cc", "src/compiler/bytecode-analysis.cc",
"src/compiler/bytecode-branch-analysis.h", "src/compiler/bytecode-analysis.h",
"src/compiler/bytecode-graph-builder.cc", "src/compiler/bytecode-graph-builder.cc",
"src/compiler/bytecode-graph-builder.h", "src/compiler/bytecode-graph-builder.h",
"src/compiler/bytecode-loop-analysis.cc", "src/compiler/bytecode-liveness-map.cc",
"src/compiler/bytecode-loop-analysis.h", "src/compiler/bytecode-liveness-map.h",
"src/compiler/c-linkage.cc", "src/compiler/c-linkage.cc",
"src/compiler/checkpoint-elimination.cc", "src/compiler/checkpoint-elimination.cc",
"src/compiler/checkpoint-elimination.h", "src/compiler/checkpoint-elimination.h",
@ -1065,6 +1098,8 @@ v8_source_set("v8_base") {
"src/compiler/frame.h", "src/compiler/frame.h",
"src/compiler/gap-resolver.cc", "src/compiler/gap-resolver.cc",
"src/compiler/gap-resolver.h", "src/compiler/gap-resolver.h",
"src/compiler/graph-assembler.cc",
"src/compiler/graph-assembler.h",
"src/compiler/graph-reducer.cc", "src/compiler/graph-reducer.cc",
"src/compiler/graph-reducer.h", "src/compiler/graph-reducer.h",
"src/compiler/graph-replay.cc", "src/compiler/graph-replay.cc",
@ -1196,8 +1231,6 @@ v8_source_set("v8_base") {
"src/compiler/tail-call-optimization.h", "src/compiler/tail-call-optimization.h",
"src/compiler/type-cache.cc", "src/compiler/type-cache.cc",
"src/compiler/type-cache.h", "src/compiler/type-cache.h",
"src/compiler/type-hint-analyzer.cc",
"src/compiler/type-hint-analyzer.h",
"src/compiler/typed-optimization.cc", "src/compiler/typed-optimization.cc",
"src/compiler/typed-optimization.h", "src/compiler/typed-optimization.h",
"src/compiler/typer.cc", "src/compiler/typer.cc",
@ -1300,6 +1333,7 @@ v8_source_set("v8_base") {
"src/debug/debug-scopes.h", "src/debug/debug-scopes.h",
"src/debug/debug.cc", "src/debug/debug.cc",
"src/debug/debug.h", "src/debug/debug.h",
"src/debug/interface-types.h",
"src/debug/liveedit.cc", "src/debug/liveedit.cc",
"src/debug/liveedit.h", "src/debug/liveedit.h",
"src/deoptimize-reason.cc", "src/deoptimize-reason.cc",
@ -1373,6 +1407,8 @@ v8_source_set("v8_base") {
"src/heap/array-buffer-tracker.h", "src/heap/array-buffer-tracker.h",
"src/heap/code-stats.cc", "src/heap/code-stats.cc",
"src/heap/code-stats.h", "src/heap/code-stats.h",
"src/heap/embedder-tracing.cc",
"src/heap/embedder-tracing.h",
"src/heap/gc-idle-time-handler.cc", "src/heap/gc-idle-time-handler.cc",
"src/heap/gc-idle-time-handler.h", "src/heap/gc-idle-time-handler.h",
"src/heap/gc-tracer.cc", "src/heap/gc-tracer.cc",
@ -1414,6 +1450,9 @@ v8_source_set("v8_base") {
"src/ic/access-compiler-data.h", "src/ic/access-compiler-data.h",
"src/ic/access-compiler.cc", "src/ic/access-compiler.cc",
"src/ic/access-compiler.h", "src/ic/access-compiler.h",
"src/ic/accessor-assembler-impl.h",
"src/ic/accessor-assembler.cc",
"src/ic/accessor-assembler.h",
"src/ic/call-optimization.cc", "src/ic/call-optimization.cc",
"src/ic/call-optimization.h", "src/ic/call-optimization.h",
"src/ic/handler-compiler.cc", "src/ic/handler-compiler.cc",
@ -1425,6 +1464,8 @@ v8_source_set("v8_base") {
"src/ic/ic-inl.h", "src/ic/ic-inl.h",
"src/ic/ic-state.cc", "src/ic/ic-state.cc",
"src/ic/ic-state.h", "src/ic/ic-state.h",
"src/ic/ic-stats.cc",
"src/ic/ic-stats.h",
"src/ic/ic.cc", "src/ic/ic.cc",
"src/ic/ic.h", "src/ic/ic.h",
"src/ic/keyed-store-generic.cc", "src/ic/keyed-store-generic.cc",
@ -1437,10 +1478,14 @@ v8_source_set("v8_base") {
"src/identity-map.h", "src/identity-map.h",
"src/interface-descriptors.cc", "src/interface-descriptors.cc",
"src/interface-descriptors.h", "src/interface-descriptors.h",
"src/interpreter/bytecode-array-accessor.cc",
"src/interpreter/bytecode-array-accessor.h",
"src/interpreter/bytecode-array-builder.cc", "src/interpreter/bytecode-array-builder.cc",
"src/interpreter/bytecode-array-builder.h", "src/interpreter/bytecode-array-builder.h",
"src/interpreter/bytecode-array-iterator.cc", "src/interpreter/bytecode-array-iterator.cc",
"src/interpreter/bytecode-array-iterator.h", "src/interpreter/bytecode-array-iterator.h",
"src/interpreter/bytecode-array-random-iterator.cc",
"src/interpreter/bytecode-array-random-iterator.h",
"src/interpreter/bytecode-array-writer.cc", "src/interpreter/bytecode-array-writer.cc",
"src/interpreter/bytecode-array-writer.h", "src/interpreter/bytecode-array-writer.h",
"src/interpreter/bytecode-dead-code-optimizer.cc", "src/interpreter/bytecode-dead-code-optimizer.cc",
@ -1509,6 +1554,8 @@ v8_source_set("v8_base") {
"src/machine-type.cc", "src/machine-type.cc",
"src/machine-type.h", "src/machine-type.h",
"src/macro-assembler.h", "src/macro-assembler.h",
"src/map-updater.cc",
"src/map-updater.h",
"src/messages.cc", "src/messages.cc",
"src/messages.h", "src/messages.h",
"src/msan.h", "src/msan.h",
@ -1519,6 +1566,11 @@ v8_source_set("v8_base") {
"src/objects-printer.cc", "src/objects-printer.cc",
"src/objects.cc", "src/objects.cc",
"src/objects.h", "src/objects.h",
"src/objects/module-info.h",
"src/objects/object-macros-undef.h",
"src/objects/object-macros.h",
"src/objects/scope-info.cc",
"src/objects/scope-info.h",
"src/ostreams.cc", "src/ostreams.cc",
"src/ostreams.h", "src/ostreams.h",
"src/parsing/duplicate-finder.cc", "src/parsing/duplicate-finder.cc",
@ -1533,6 +1585,8 @@ v8_source_set("v8_base") {
"src/parsing/parser-base.h", "src/parsing/parser-base.h",
"src/parsing/parser.cc", "src/parsing/parser.cc",
"src/parsing/parser.h", "src/parsing/parser.h",
"src/parsing/parsing.cc",
"src/parsing/parsing.h",
"src/parsing/pattern-rewriter.cc", "src/parsing/pattern-rewriter.cc",
"src/parsing/preparse-data-format.h", "src/parsing/preparse-data-format.h",
"src/parsing/preparse-data.cc", "src/parsing/preparse-data.cc",
@ -1578,8 +1632,6 @@ v8_source_set("v8_base") {
"src/profiler/tracing-cpu-profiler.h", "src/profiler/tracing-cpu-profiler.h",
"src/profiler/unbound-queue-inl.h", "src/profiler/unbound-queue-inl.h",
"src/profiler/unbound-queue.h", "src/profiler/unbound-queue.h",
"src/promise-utils.cc",
"src/promise-utils.h",
"src/property-descriptor.cc", "src/property-descriptor.cc",
"src/property-descriptor.h", "src/property-descriptor.h",
"src/property-details.h", "src/property-details.h",
@ -1679,6 +1731,8 @@ v8_source_set("v8_base") {
"src/startup-data-util.h", "src/startup-data-util.h",
"src/string-builder.cc", "src/string-builder.cc",
"src/string-builder.h", "src/string-builder.h",
"src/string-case.cc",
"src/string-case.h",
"src/string-search.h", "src/string-search.h",
"src/string-stream.cc", "src/string-stream.cc",
"src/string-stream.h", "src/string-stream.h",
@ -1693,6 +1747,7 @@ v8_source_set("v8_base") {
"src/transitions-inl.h", "src/transitions-inl.h",
"src/transitions.cc", "src/transitions.cc",
"src/transitions.h", "src/transitions.h",
"src/trap-handler/trap-handler.h",
"src/type-feedback-vector-inl.h", "src/type-feedback-vector-inl.h",
"src/type-feedback-vector.cc", "src/type-feedback-vector.cc",
"src/type-feedback-vector.h", "src/type-feedback-vector.h",
@ -1724,9 +1779,9 @@ v8_source_set("v8_base") {
"src/version.h", "src/version.h",
"src/vm-state-inl.h", "src/vm-state-inl.h",
"src/vm-state.h", "src/vm-state.h",
"src/wasm/ast-decoder.cc",
"src/wasm/ast-decoder.h",
"src/wasm/decoder.h", "src/wasm/decoder.h",
"src/wasm/function-body-decoder.cc",
"src/wasm/function-body-decoder.h",
"src/wasm/leb-helper.h", "src/wasm/leb-helper.h",
"src/wasm/managed.h", "src/wasm/managed.h",
"src/wasm/module-decoder.cc", "src/wasm/module-decoder.cc",
@ -1740,6 +1795,7 @@ v8_source_set("v8_base") {
"src/wasm/wasm-interpreter.h", "src/wasm/wasm-interpreter.h",
"src/wasm/wasm-js.cc", "src/wasm/wasm-js.cc",
"src/wasm/wasm-js.h", "src/wasm/wasm-js.h",
"src/wasm/wasm-limits.h",
"src/wasm/wasm-macro-gen.h", "src/wasm/wasm-macro-gen.h",
"src/wasm/wasm-module-builder.cc", "src/wasm/wasm-module-builder.cc",
"src/wasm/wasm-module-builder.h", "src/wasm/wasm-module-builder.h",
@ -1751,12 +1807,15 @@ v8_source_set("v8_base") {
"src/wasm/wasm-opcodes.h", "src/wasm/wasm-opcodes.h",
"src/wasm/wasm-result.cc", "src/wasm/wasm-result.cc",
"src/wasm/wasm-result.h", "src/wasm/wasm-result.h",
"src/wasm/wasm-text.cc",
"src/wasm/wasm-text.h",
"src/zone/accounting-allocator.cc", "src/zone/accounting-allocator.cc",
"src/zone/accounting-allocator.h", "src/zone/accounting-allocator.h",
"src/zone/zone-allocator.h", "src/zone/zone-allocator.h",
"src/zone/zone-allocator.h", "src/zone/zone-allocator.h",
"src/zone/zone-chunk-list.h", "src/zone/zone-chunk-list.h",
"src/zone/zone-containers.h", "src/zone/zone-containers.h",
"src/zone/zone-handle-set.h",
"src/zone/zone-segment.cc", "src/zone/zone-segment.cc",
"src/zone/zone-segment.h", "src/zone/zone-segment.h",
"src/zone/zone.cc", "src/zone/zone.cc",
@ -1797,9 +1856,7 @@ v8_source_set("v8_base") {
"src/ia32/simulator-ia32.h", "src/ia32/simulator-ia32.h",
"src/ic/ia32/access-compiler-ia32.cc", "src/ic/ia32/access-compiler-ia32.cc",
"src/ic/ia32/handler-compiler-ia32.cc", "src/ic/ia32/handler-compiler-ia32.cc",
"src/ic/ia32/ic-compiler-ia32.cc",
"src/ic/ia32/ic-ia32.cc", "src/ic/ia32/ic-ia32.cc",
"src/ic/ia32/stub-cache-ia32.cc",
"src/regexp/ia32/regexp-macro-assembler-ia32.cc", "src/regexp/ia32/regexp-macro-assembler-ia32.cc",
"src/regexp/ia32/regexp-macro-assembler-ia32.h", "src/regexp/ia32/regexp-macro-assembler-ia32.h",
] ]
@ -1822,9 +1879,7 @@ v8_source_set("v8_base") {
"src/full-codegen/x64/full-codegen-x64.cc", "src/full-codegen/x64/full-codegen-x64.cc",
"src/ic/x64/access-compiler-x64.cc", "src/ic/x64/access-compiler-x64.cc",
"src/ic/x64/handler-compiler-x64.cc", "src/ic/x64/handler-compiler-x64.cc",
"src/ic/x64/ic-compiler-x64.cc",
"src/ic/x64/ic-x64.cc", "src/ic/x64/ic-x64.cc",
"src/ic/x64/stub-cache-x64.cc",
"src/regexp/x64/regexp-macro-assembler-x64.cc", "src/regexp/x64/regexp-macro-assembler-x64.cc",
"src/regexp/x64/regexp-macro-assembler-x64.h", "src/regexp/x64/regexp-macro-assembler-x64.h",
"src/third_party/valgrind/valgrind.h", "src/third_party/valgrind/valgrind.h",
@ -1889,8 +1944,6 @@ v8_source_set("v8_base") {
"src/ic/arm/access-compiler-arm.cc", "src/ic/arm/access-compiler-arm.cc",
"src/ic/arm/handler-compiler-arm.cc", "src/ic/arm/handler-compiler-arm.cc",
"src/ic/arm/ic-arm.cc", "src/ic/arm/ic-arm.cc",
"src/ic/arm/ic-compiler-arm.cc",
"src/ic/arm/stub-cache-arm.cc",
"src/regexp/arm/regexp-macro-assembler-arm.cc", "src/regexp/arm/regexp-macro-assembler-arm.cc",
"src/regexp/arm/regexp-macro-assembler-arm.h", "src/regexp/arm/regexp-macro-assembler-arm.h",
] ]
@ -1948,8 +2001,6 @@ v8_source_set("v8_base") {
"src/ic/arm64/access-compiler-arm64.cc", "src/ic/arm64/access-compiler-arm64.cc",
"src/ic/arm64/handler-compiler-arm64.cc", "src/ic/arm64/handler-compiler-arm64.cc",
"src/ic/arm64/ic-arm64.cc", "src/ic/arm64/ic-arm64.cc",
"src/ic/arm64/ic-compiler-arm64.cc",
"src/ic/arm64/stub-cache-arm64.cc",
"src/regexp/arm64/regexp-macro-assembler-arm64.cc", "src/regexp/arm64/regexp-macro-assembler-arm64.cc",
"src/regexp/arm64/regexp-macro-assembler-arm64.h", "src/regexp/arm64/regexp-macro-assembler-arm64.h",
] ]
@ -1970,9 +2021,7 @@ v8_source_set("v8_base") {
"src/full-codegen/mips/full-codegen-mips.cc", "src/full-codegen/mips/full-codegen-mips.cc",
"src/ic/mips/access-compiler-mips.cc", "src/ic/mips/access-compiler-mips.cc",
"src/ic/mips/handler-compiler-mips.cc", "src/ic/mips/handler-compiler-mips.cc",
"src/ic/mips/ic-compiler-mips.cc",
"src/ic/mips/ic-mips.cc", "src/ic/mips/ic-mips.cc",
"src/ic/mips/stub-cache-mips.cc",
"src/mips/assembler-mips-inl.h", "src/mips/assembler-mips-inl.h",
"src/mips/assembler-mips.cc", "src/mips/assembler-mips.cc",
"src/mips/assembler-mips.h", "src/mips/assembler-mips.h",
@ -2012,9 +2061,7 @@ v8_source_set("v8_base") {
"src/full-codegen/mips64/full-codegen-mips64.cc", "src/full-codegen/mips64/full-codegen-mips64.cc",
"src/ic/mips64/access-compiler-mips64.cc", "src/ic/mips64/access-compiler-mips64.cc",
"src/ic/mips64/handler-compiler-mips64.cc", "src/ic/mips64/handler-compiler-mips64.cc",
"src/ic/mips64/ic-compiler-mips64.cc",
"src/ic/mips64/ic-mips64.cc", "src/ic/mips64/ic-mips64.cc",
"src/ic/mips64/stub-cache-mips64.cc",
"src/mips64/assembler-mips64-inl.h", "src/mips64/assembler-mips64-inl.h",
"src/mips64/assembler-mips64.cc", "src/mips64/assembler-mips64.cc",
"src/mips64/assembler-mips64.h", "src/mips64/assembler-mips64.h",
@ -2054,9 +2101,7 @@ v8_source_set("v8_base") {
"src/full-codegen/ppc/full-codegen-ppc.cc", "src/full-codegen/ppc/full-codegen-ppc.cc",
"src/ic/ppc/access-compiler-ppc.cc", "src/ic/ppc/access-compiler-ppc.cc",
"src/ic/ppc/handler-compiler-ppc.cc", "src/ic/ppc/handler-compiler-ppc.cc",
"src/ic/ppc/ic-compiler-ppc.cc",
"src/ic/ppc/ic-ppc.cc", "src/ic/ppc/ic-ppc.cc",
"src/ic/ppc/stub-cache-ppc.cc",
"src/ppc/assembler-ppc-inl.h", "src/ppc/assembler-ppc-inl.h",
"src/ppc/assembler-ppc.cc", "src/ppc/assembler-ppc.cc",
"src/ppc/assembler-ppc.h", "src/ppc/assembler-ppc.h",
@ -2096,9 +2141,7 @@ v8_source_set("v8_base") {
"src/full-codegen/s390/full-codegen-s390.cc", "src/full-codegen/s390/full-codegen-s390.cc",
"src/ic/s390/access-compiler-s390.cc", "src/ic/s390/access-compiler-s390.cc",
"src/ic/s390/handler-compiler-s390.cc", "src/ic/s390/handler-compiler-s390.cc",
"src/ic/s390/ic-compiler-s390.cc",
"src/ic/s390/ic-s390.cc", "src/ic/s390/ic-s390.cc",
"src/ic/s390/stub-cache-s390.cc",
"src/regexp/s390/regexp-macro-assembler-s390.cc", "src/regexp/s390/regexp-macro-assembler-s390.cc",
"src/regexp/s390/regexp-macro-assembler-s390.h", "src/regexp/s390/regexp-macro-assembler-s390.h",
"src/s390/assembler-s390-inl.h", "src/s390/assembler-s390-inl.h",
@ -2138,9 +2181,7 @@ v8_source_set("v8_base") {
"src/full-codegen/x87/full-codegen-x87.cc", "src/full-codegen/x87/full-codegen-x87.cc",
"src/ic/x87/access-compiler-x87.cc", "src/ic/x87/access-compiler-x87.cc",
"src/ic/x87/handler-compiler-x87.cc", "src/ic/x87/handler-compiler-x87.cc",
"src/ic/x87/ic-compiler-x87.cc",
"src/ic/x87/ic-x87.cc", "src/ic/x87/ic-x87.cc",
"src/ic/x87/stub-cache-x87.cc",
"src/regexp/x87/regexp-macro-assembler-x87.cc", "src/regexp/x87/regexp-macro-assembler-x87.cc",
"src/regexp/x87/regexp-macro-assembler-x87.h", "src/regexp/x87/regexp-macro-assembler-x87.h",
"src/x87/assembler-x87-inl.h", "src/x87/assembler-x87-inl.h",
@ -2169,6 +2210,7 @@ v8_source_set("v8_base") {
deps = [ deps = [
":v8_libbase", ":v8_libbase",
":v8_libsampler", ":v8_libsampler",
":v8_version",
] ]
sources += [ v8_generated_peephole_source ] sources += [ v8_generated_peephole_source ]
@ -2196,7 +2238,7 @@ v8_source_set("v8_base") {
deps += [ ":postmortem-metadata" ] deps += [ ":postmortem-metadata" ]
} }
if (v8_enable_inspector_override) { if (v8_enable_inspector) {
deps += [ "src/inspector:inspector" ] deps += [ "src/inspector:inspector" ]
} }
} }
@ -2399,14 +2441,10 @@ v8_source_set("fuzzer_support") {
":v8_libbase", ":v8_libbase",
":v8_libplatform", ":v8_libplatform",
] ]
}
v8_source_set("simple_fuzzer") { if (v8_enable_i18n_support) {
sources = [ deps += [ "//third_party/icu" ]
"test/fuzzer/fuzzer.cc", }
]
configs = [ ":internal_config_base" ]
} }
############################################################################### ###############################################################################
@ -2477,14 +2515,10 @@ group("gn_all") {
deps = [ deps = [
":d8", ":d8",
":v8_fuzzers",
":v8_hello_world", ":v8_hello_world",
":v8_parser_shell", ":v8_parser_shell",
":v8_sample_process", ":v8_sample_process",
":v8_simple_json_fuzzer",
":v8_simple_parser_fuzzer",
":v8_simple_regexp_fuzzer",
":v8_simple_wasm_asmjs_fuzzer",
":v8_simple_wasm_fuzzer",
"test:gn_all", "test:gn_all",
"tools:gn_all", "tools:gn_all",
] ]
@ -2498,6 +2532,26 @@ group("gn_all") {
} }
} }
group("v8_fuzzers") {
testonly = true
deps = [
":v8_simple_json_fuzzer",
":v8_simple_parser_fuzzer",
":v8_simple_regexp_fuzzer",
":v8_simple_wasm_asmjs_fuzzer",
":v8_simple_wasm_call_fuzzer",
":v8_simple_wasm_code_fuzzer",
":v8_simple_wasm_data_section_fuzzer",
":v8_simple_wasm_function_sigs_section_fuzzer",
":v8_simple_wasm_fuzzer",
":v8_simple_wasm_globals_section_fuzzer",
":v8_simple_wasm_imports_section_fuzzer",
":v8_simple_wasm_memory_section_fuzzer",
":v8_simple_wasm_names_section_fuzzer",
":v8_simple_wasm_types_section_fuzzer",
]
}
if (is_component_build) { if (is_component_build) {
v8_component("v8") { v8_component("v8") {
sources = [ sources = [
@ -2527,6 +2581,7 @@ if (is_component_build) {
":v8_base", ":v8_base",
":v8_maybe_snapshot", ":v8_maybe_snapshot",
] ]
public_configs = [ ":external_config" ] public_configs = [ ":external_config" ]
} }
} }
@ -2566,8 +2621,12 @@ v8_executable("d8") {
deps += [ "//third_party/icu" ] deps += [ "//third_party/icu" ]
} }
if (v8_correctness_fuzzer) {
deps += [ "tools/foozzie:v8_correctness_fuzzer_resources" ]
}
defines = [] defines = []
if (v8_enable_inspector_override) { if (v8_enable_inspector) {
defines += [ "V8_INSPECTOR_ENABLED" ] defines += [ "V8_INSPECTOR_ENABLED" ]
} }
} }
@ -2687,10 +2746,14 @@ template("v8_fuzzer") {
v8_executable("v8_simple_" + name) { v8_executable("v8_simple_" + name) {
deps = [ deps = [
":" + name, ":" + name,
":simple_fuzzer", "//build/config/sanitizers:deps",
"//build/win:default_exe_manifest", "//build/win:default_exe_manifest",
] ]
sources = [
"test/fuzzer/fuzzer.cc",
]
configs = [ ":external_config" ] configs = [ ":external_config" ]
} }
} }

2490
deps/v8/ChangeLog vendored

File diff suppressed because it is too large Load Diff

22
deps/v8/DEPS vendored
View File

@ -8,15 +8,15 @@ vars = {
deps = { deps = {
"v8/build": "v8/build":
Var("chromium_url") + "/chromium/src/build.git" + "@" + "a3b623a6eff6dc9d58a03251ae22bccf92f67cb2", Var("chromium_url") + "/chromium/src/build.git" + "@" + "f55127ddc3632dbd6fef285c71ab4cb103e08f0c",
"v8/tools/gyp": "v8/tools/gyp":
Var("chromium_url") + "/external/gyp.git" + "@" + "e7079f0e0e14108ab0dba58728ff219637458563", Var("chromium_url") + "/external/gyp.git" + "@" + "e7079f0e0e14108ab0dba58728ff219637458563",
"v8/third_party/icu": "v8/third_party/icu":
Var("chromium_url") + "/chromium/deps/icu.git" + "@" + "c1a237113f525a1561d4b322d7653e1083f79aaa", Var("chromium_url") + "/chromium/deps/icu.git" + "@" + "9cd2828740572ba6f694b9365236a8356fd06147",
"v8/third_party/instrumented_libraries": "v8/third_party/instrumented_libraries":
Var("chromium_url") + "/chromium/src/third_party/instrumented_libraries.git" + "@" + "45f5814b1543e41ea0be54c771e3840ea52cca4a", Var("chromium_url") + "/chromium/src/third_party/instrumented_libraries.git" + "@" + "5b6f777da671be977f56f0e8fc3469a3ccbb4474",
"v8/buildtools": "v8/buildtools":
Var("chromium_url") + "/chromium/buildtools.git" + "@" + "39b1db2ab4aa4b2ccaa263c29bdf63e7c1ee28aa", Var("chromium_url") + "/chromium/buildtools.git" + "@" + "cb12d6e8641f0c9b0fbbfa4bf17c55c6c0d3c38f",
"v8/base/trace_event/common": "v8/base/trace_event/common":
Var("chromium_url") + "/chromium/src/base/trace_event/common.git" + "@" + "06294c8a4a6f744ef284cd63cfe54dbf61eea290", Var("chromium_url") + "/chromium/src/base/trace_event/common.git" + "@" + "06294c8a4a6f744ef284cd63cfe54dbf61eea290",
"v8/third_party/jinja2": "v8/third_party/jinja2":
@ -24,7 +24,7 @@ deps = {
"v8/third_party/markupsafe": "v8/third_party/markupsafe":
Var("chromium_url") + "/chromium/src/third_party/markupsafe.git" + "@" + "484a5661041cac13bfc688a26ec5434b05d18961", Var("chromium_url") + "/chromium/src/third_party/markupsafe.git" + "@" + "484a5661041cac13bfc688a26ec5434b05d18961",
"v8/tools/swarming_client": "v8/tools/swarming_client":
Var('chromium_url') + '/external/swarming.client.git' + '@' + "380e32662312eb107f06fcba6409b0409f8fef72", Var('chromium_url') + '/external/swarming.client.git' + '@' + "ebc8dab6f8b8d79ec221c94de39a921145abd404",
"v8/testing/gtest": "v8/testing/gtest":
Var("chromium_url") + "/external/github.com/google/googletest.git" + "@" + "6f8a66431cb592dad629028a50b3dd418a408c87", Var("chromium_url") + "/external/github.com/google/googletest.git" + "@" + "6f8a66431cb592dad629028a50b3dd418a408c87",
"v8/testing/gmock": "v8/testing/gmock":
@ -35,19 +35,19 @@ deps = {
Var("chromium_url") + "/v8/deps/third_party/mozilla-tests.git" + "@" + "f6c578a10ea707b1a8ab0b88943fe5115ce2b9be", Var("chromium_url") + "/v8/deps/third_party/mozilla-tests.git" + "@" + "f6c578a10ea707b1a8ab0b88943fe5115ce2b9be",
"v8/test/simdjs/data": Var("chromium_url") + "/external/github.com/tc39/ecmascript_simd.git" + "@" + "baf493985cb9ea7cdbd0d68704860a8156de9556", "v8/test/simdjs/data": Var("chromium_url") + "/external/github.com/tc39/ecmascript_simd.git" + "@" + "baf493985cb9ea7cdbd0d68704860a8156de9556",
"v8/test/test262/data": "v8/test/test262/data":
Var("chromium_url") + "/external/github.com/tc39/test262.git" + "@" + "fb61ab44eb1bbc2699d714fc00e33af2a19411ce", Var("chromium_url") + "/external/github.com/tc39/test262.git" + "@" + "6a0f1189eb00d38ef9760cb65cbc41c066876cde",
"v8/test/test262/harness": "v8/test/test262/harness":
Var("chromium_url") + "/external/github.com/test262-utils/test262-harness-py.git" + "@" + "cbd968f54f7a95c6556d53ba852292a4c49d11d8", Var("chromium_url") + "/external/github.com/test262-utils/test262-harness-py.git" + "@" + "0f2acdd882c84cff43b9d60df7574a1901e2cdcd",
"v8/tools/clang": "v8/tools/clang":
Var("chromium_url") + "/chromium/src/tools/clang.git" + "@" + "75350a858c51ad69e2aae051a8727534542da29f", Var("chromium_url") + "/chromium/src/tools/clang.git" + "@" + "f7ce1a5678e5addc015aed5f1e7734bbd2caac7c",
} }
deps_os = { deps_os = {
"android": { "android": {
"v8/third_party/android_tools": "v8/third_party/android_tools":
Var("chromium_url") + "/android_tools.git" + "@" + "25d57ead05d3dfef26e9c19b13ed10b0a69829cf", Var("chromium_url") + "/android_tools.git" + "@" + "b43a6a289a7588b1769814f04dd6c7d7176974cc",
"v8/third_party/catapult": "v8/third_party/catapult":
Var('chromium_url') + "/external/github.com/catapult-project/catapult.git" + "@" + "6962f5c0344a79b152bf84460a93e1b2e11ea0f4", Var('chromium_url') + "/external/github.com/catapult-project/catapult.git" + "@" + "143ba4ddeb05e6165fb8413c5f3f47d342922d24",
}, },
"win": { "win": {
"v8/third_party/cygwin": "v8/third_party/cygwin":
@ -263,7 +263,7 @@ hooks = [
# Update the Windows toolchain if necessary. # Update the Windows toolchain if necessary.
'name': 'win_toolchain', 'name': 'win_toolchain',
'pattern': '.', 'pattern': '.',
'action': ['python', 'v8/gypfiles/vs_toolchain.py', 'update'], 'action': ['python', 'v8/build/vs_toolchain.py', 'update'],
}, },
# Pull binutils for linux, enabled debug fission for faster linking / # Pull binutils for linux, enabled debug fission for faster linking /
# debugging when used with clang on Ubuntu Precise. # debugging when used with clang on Ubuntu Precise.

7
deps/v8/OWNERS vendored
View File

@ -5,14 +5,19 @@ binji@chromium.org
bmeurer@chromium.org bmeurer@chromium.org
bradnelson@chromium.org bradnelson@chromium.org
cbruni@chromium.org cbruni@chromium.org
clemensh@chromium.org
danno@chromium.org danno@chromium.org
epertoso@chromium.org epertoso@chromium.org
franzih@chromium.org
gsathya@chromium.org
hablich@chromium.org hablich@chromium.org
hpayer@chromium.org hpayer@chromium.org
ishell@chromium.org ishell@chromium.org
jarin@chromium.org jarin@chromium.org
jgruber@chromium.org
jkummerow@chromium.org jkummerow@chromium.org
jochen@chromium.org jochen@chromium.org
leszeks@chromium.org
littledan@chromium.org littledan@chromium.org
machenbach@chromium.org machenbach@chromium.org
marja@chromium.org marja@chromium.org
@ -21,9 +26,11 @@ mstarzinger@chromium.org
mtrofin@chromium.org mtrofin@chromium.org
mvstanton@chromium.org mvstanton@chromium.org
mythria@chromium.org mythria@chromium.org
petermarshall@chromium.org
neis@chromium.org neis@chromium.org
rmcilroy@chromium.org rmcilroy@chromium.org
rossberg@chromium.org rossberg@chromium.org
tebbi@chromium.org
titzer@chromium.org titzer@chromium.org
ulan@chromium.org ulan@chromium.org
verwaest@chromium.org verwaest@chromium.org

15
deps/v8/PRESUBMIT.py vendored
View File

@ -67,19 +67,22 @@ def _V8PresubmitChecks(input_api, output_api):
input_api.PresubmitLocalPath(), 'tools')) input_api.PresubmitLocalPath(), 'tools'))
from presubmit import CppLintProcessor from presubmit import CppLintProcessor
from presubmit import SourceProcessor from presubmit import SourceProcessor
from presubmit import CheckAuthorizedAuthor from presubmit import StatusFilesProcessor
from presubmit import CheckStatusFiles
results = [] results = []
if not CppLintProcessor().Run(input_api.PresubmitLocalPath()): if not CppLintProcessor().RunOnFiles(
input_api.AffectedFiles(include_deletes=False)):
results.append(output_api.PresubmitError("C++ lint check failed")) results.append(output_api.PresubmitError("C++ lint check failed"))
if not SourceProcessor().Run(input_api.PresubmitLocalPath()): if not SourceProcessor().RunOnFiles(
input_api.AffectedFiles(include_deletes=False)):
results.append(output_api.PresubmitError( results.append(output_api.PresubmitError(
"Copyright header, trailing whitespaces and two empty lines " \ "Copyright header, trailing whitespaces and two empty lines " \
"between declarations check failed")) "between declarations check failed"))
if not CheckStatusFiles(input_api.PresubmitLocalPath()): if not StatusFilesProcessor().RunOnFiles(
input_api.AffectedFiles(include_deletes=False)):
results.append(output_api.PresubmitError("Status file check failed")) results.append(output_api.PresubmitError("Status file check failed"))
results.extend(CheckAuthorizedAuthor(input_api, output_api)) results.extend(input_api.canned_checks.CheckAuthorizedAuthor(
input_api, output_api))
return results return results

View File

@ -24,3 +24,9 @@ linux_use_bundled_binutils_override = true
asan_suppressions_file = "//build/sanitizers/asan_suppressions.cc" asan_suppressions_file = "//build/sanitizers/asan_suppressions.cc"
lsan_suppressions_file = "//build/sanitizers/lsan_suppressions.cc" lsan_suppressions_file = "//build/sanitizers/lsan_suppressions.cc"
tsan_suppressions_file = "//build/sanitizers/tsan_suppressions.cc" tsan_suppressions_file = "//build/sanitizers/tsan_suppressions.cc"
# Skip assertions about 4GiB file size limit.
ignore_elf32_limitations = true
# Use the system install of Xcode for tools like ibtool, libtool, etc.
use_system_xcode = true

View File

@ -2,14 +2,7 @@
# Use of this source code is governed by a BSD-style license that can be # Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file. # found in the LICENSE file.
import("//build/config/features.gni")
import("//build/config/ui.gni")
import("//build/config/v8_target_cpu.gni") import("//build/config/v8_target_cpu.gni")
import("//gni/v8.gni")
if (is_android) {
import("//build/config/android/config.gni")
}
if (((v8_current_cpu == "x86" || v8_current_cpu == "x64" || if (((v8_current_cpu == "x86" || v8_current_cpu == "x64" ||
v8_current_cpu == "x87") && (is_linux || is_mac)) || v8_current_cpu == "x87") && (is_linux || is_mac)) ||
@ -24,9 +17,9 @@ v8_extra_library_files = [ "//test/cctest/test-extra.js" ]
v8_experimental_extra_library_files = v8_experimental_extra_library_files =
[ "//test/cctest/test-experimental-extra.js" ] [ "//test/cctest/test-experimental-extra.js" ]
declare_args() { v8_enable_inspector_override = true
# Enable inspector. See include/v8-inspector.h.
v8_enable_inspector = true
}
v8_enable_inspector_override = v8_enable_inspector declare_args() {
# Use static libraries instead of source_sets.
v8_static_library = false
}

View File

@ -3,7 +3,6 @@
# found in the LICENSE file. # found in the LICENSE file.
import("//build/config/sanitizers/sanitizers.gni") import("//build/config/sanitizers/sanitizers.gni")
import("//build_overrides/v8.gni")
import("//third_party/icu/config.gni") import("//third_party/icu/config.gni")
import("v8.gni") import("v8.gni")
@ -97,7 +96,7 @@ template("v8_isolate_run") {
} else { } else {
icu_use_data_file_flag = "0" icu_use_data_file_flag = "0"
} }
if (v8_enable_inspector_override) { if (v8_enable_inspector) {
enable_inspector = "1" enable_inspector = "1"
} else { } else {
enable_inspector = "0" enable_inspector = "0"
@ -181,7 +180,7 @@ template("v8_isolate_run") {
if (is_win) { if (is_win) {
args += [ args += [
"--config-variable", "--config-variable",
"msvs_version=2013", "msvs_version=2015",
] ]
} else { } else {
args += [ args += [

26
deps/v8/gni/v8.gni vendored
View File

@ -4,8 +4,12 @@
import("//build/config/sanitizers/sanitizers.gni") import("//build/config/sanitizers/sanitizers.gni")
import("//build/config/v8_target_cpu.gni") import("//build/config/v8_target_cpu.gni")
import("//build_overrides/v8.gni")
declare_args() { declare_args() {
# Includes files needed for correctness fuzzing.
v8_correctness_fuzzer = false
# Indicate if valgrind was fetched as a custom deps to make it available on # Indicate if valgrind was fetched as a custom deps to make it available on
# swarming. # swarming.
v8_has_valgrind = false v8_has_valgrind = false
@ -30,6 +34,9 @@ declare_args() {
# Enable ECMAScript Internationalization API. Enabling this feature will # Enable ECMAScript Internationalization API. Enabling this feature will
# add a dependency on the ICU library. # add a dependency on the ICU library.
v8_enable_i18n_support = true v8_enable_i18n_support = true
# Enable inspector. See include/v8-inspector.h.
v8_enable_inspector = v8_enable_inspector_override
} }
if (v8_use_external_startup_data == "") { if (v8_use_external_startup_data == "") {
@ -83,11 +90,20 @@ if (is_posix && v8_enable_backtrace) {
# All templates should be kept in sync. # All templates should be kept in sync.
template("v8_source_set") { template("v8_source_set") {
source_set(target_name) { if (defined(v8_static_library) && v8_static_library) {
forward_variables_from(invoker, "*", [ "configs" ]) static_library(target_name) {
configs += invoker.configs forward_variables_from(invoker, "*", [ "configs" ])
configs -= v8_remove_configs configs += invoker.configs
configs += v8_add_configs configs -= v8_remove_configs
configs += v8_add_configs
}
} else {
source_set(target_name) {
forward_variables_from(invoker, "*", [ "configs" ])
configs += invoker.configs
configs -= v8_remove_configs
configs += v8_add_configs
}
} }
} }

View File

@ -27,10 +27,14 @@
}], }],
['v8_enable_inspector==1', { ['v8_enable_inspector==1', {
'dependencies': [ 'dependencies': [
'../test/debugger/debugger.gyp:*',
'../test/inspector/inspector.gyp:*', '../test/inspector/inspector.gyp:*',
], ],
}], }],
['v8_enable_inspector==1 and test_isolation_mode != "noop"', {
'dependencies': [
'../test/debugger/debugger.gyp:*',
],
}],
['test_isolation_mode != "noop"', { ['test_isolation_mode != "noop"', {
'dependencies': [ 'dependencies': [
'../test/bot_default.gyp:*', '../test/bot_default.gyp:*',

View File

@ -31,6 +31,7 @@ def main():
print 'Clober to fix windows build problems.' print 'Clober to fix windows build problems.'
print 'Clober again to fix windows build problems.' print 'Clober again to fix windows build problems.'
print 'Clobber to possibly resolve failure on win-32 bot.' print 'Clobber to possibly resolve failure on win-32 bot.'
print 'Clobber for http://crbug.com/668958.'
return 0 return 0

View File

@ -989,6 +989,8 @@
# present in VS 2003 and earlier. # present in VS 2003 and earlier.
'msvs_disabled_warnings': [4351], 'msvs_disabled_warnings': [4351],
'msvs_configuration_attributes': { 'msvs_configuration_attributes': {
'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)',
'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
'CharacterSet': '1', 'CharacterSet': '1',
}, },
}], }],

View File

@ -0,0 +1,97 @@
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# TODO(machenbach): Remove this when crbug.com/669910 is resolved.
{
'conditions': [
# Copy the VS runtime DLLs into the isolate so that they
# don't have to be preinstalled on the target machine.
#
# VS2013 runtimes
['OS=="win" and msvs_version==2013 and component=="shared_library" and (CONFIGURATION_NAME=="Debug" or CONFIGURATION_NAME=="Debug_x64")', {
'variables': {
'files': [
'<(PRODUCT_DIR)/msvcp120d.dll',
'<(PRODUCT_DIR)/msvcr120d.dll',
],
},
}],
['OS=="win" and msvs_version==2013 and component=="shared_library" and (CONFIGURATION_NAME=="Release" or CONFIGURATION_NAME=="Release_x64")', {
'variables': {
'files': [
'<(PRODUCT_DIR)/msvcp120.dll',
'<(PRODUCT_DIR)/msvcr120.dll',
],
},
}],
# VS2015 runtimes
['OS=="win" and msvs_version==2015 and component=="shared_library" and (CONFIGURATION_NAME=="Debug" or CONFIGURATION_NAME=="Debug_x64")', {
'variables': {
'files': [
'<(PRODUCT_DIR)/msvcp140d.dll',
'<(PRODUCT_DIR)/vccorlib140d.dll',
'<(PRODUCT_DIR)/vcruntime140d.dll',
'<(PRODUCT_DIR)/ucrtbased.dll',
],
},
}],
['OS=="win" and msvs_version==2015 and component=="shared_library" and (CONFIGURATION_NAME=="Release" or CONFIGURATION_NAME=="Release_x64")', {
'variables': {
'files': [
'<(PRODUCT_DIR)/msvcp140.dll',
'<(PRODUCT_DIR)/vccorlib140.dll',
'<(PRODUCT_DIR)/vcruntime140.dll',
'<(PRODUCT_DIR)/ucrtbase.dll',
],
},
}],
['OS=="win" and msvs_version==2015 and component=="shared_library"', {
# Windows 10 Universal C Runtime binaries.
'variables': {
'files': [
'<(PRODUCT_DIR)/api-ms-win-core-console-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-datetime-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-debug-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-errorhandling-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-file-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-file-l1-2-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-file-l2-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-handle-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-heap-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-interlocked-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-libraryloader-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-localization-l1-2-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-memory-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-namedpipe-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-processenvironment-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-processthreads-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-processthreads-l1-1-1.dll',
'<(PRODUCT_DIR)/api-ms-win-core-profile-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-rtlsupport-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-string-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-synch-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-synch-l1-2-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-sysinfo-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-timezone-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-core-util-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-crt-conio-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-crt-convert-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-crt-environment-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-crt-filesystem-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-crt-heap-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-crt-locale-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-crt-math-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-crt-multibyte-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-crt-private-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-crt-process-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-crt-runtime-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-crt-stdio-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-crt-string-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-crt-time-l1-1-0.dll',
'<(PRODUCT_DIR)/api-ms-win-crt-utility-l1-1-0.dll',
],
},
}],
],
}

View File

@ -34,6 +34,17 @@ V8_PLATFORM_EXPORT v8::Platform* CreateDefaultPlatform(
V8_PLATFORM_EXPORT bool PumpMessageLoop(v8::Platform* platform, V8_PLATFORM_EXPORT bool PumpMessageLoop(v8::Platform* platform,
v8::Isolate* isolate); v8::Isolate* isolate);
/**
* Runs pending idle tasks for at most |idle_time_in_seconds| seconds.
*
* The caller has to make sure that this is called from the right thread.
* This call does not block if no task is pending. The |platform| has to be
* created using |CreateDefaultPlatform|.
*/
V8_PLATFORM_EXPORT void RunIdleTasks(v8::Platform* platform,
v8::Isolate* isolate,
double idle_time_in_seconds);
/** /**
* Attempts to set the tracing controller for the given platform. * Attempts to set the tracing controller for the given platform.
* *

View File

@ -16,11 +16,9 @@ namespace v8 {
enum DebugEvent { enum DebugEvent {
Break = 1, Break = 1,
Exception = 2, Exception = 2,
NewFunction = 3, AfterCompile = 3,
BeforeCompile = 4, CompileError = 4,
AfterCompile = 5, AsyncTaskEvent = 5,
CompileError = 6,
AsyncTaskEvent = 7,
}; };
class V8_EXPORT Debug { class V8_EXPORT Debug {
@ -87,7 +85,6 @@ class V8_EXPORT Debug {
virtual ~Message() {} virtual ~Message() {}
}; };
/** /**
* An event details object passed to the debug event listener. * An event details object passed to the debug event listener.
*/ */
@ -145,7 +142,7 @@ class V8_EXPORT Debug {
* *
* \param message the debug message handler message object * \param message the debug message handler message object
* *
* A MessageHandler2 does not take possession of the message data, * A MessageHandler does not take possession of the message data,
* and must not rely on the data persisting after the handler returns. * and must not rely on the data persisting after the handler returns.
*/ */
typedef void (*MessageHandler)(const Message& message); typedef void (*MessageHandler)(const Message& message);
@ -167,33 +164,37 @@ class V8_EXPORT Debug {
static void CancelDebugBreak(Isolate* isolate); static void CancelDebugBreak(Isolate* isolate);
// Check if a debugger break is scheduled in the given isolate. // Check if a debugger break is scheduled in the given isolate.
static bool CheckDebugBreak(Isolate* isolate); V8_DEPRECATED("No longer supported",
static bool CheckDebugBreak(Isolate* isolate));
// Message based interface. The message protocol is JSON. // Message based interface. The message protocol is JSON.
static void SetMessageHandler(Isolate* isolate, MessageHandler handler); V8_DEPRECATED("No longer supported",
static void SetMessageHandler(Isolate* isolate,
MessageHandler handler));
static void SendCommand(Isolate* isolate, V8_DEPRECATED("No longer supported",
const uint16_t* command, int length, static void SendCommand(Isolate* isolate,
ClientData* client_data = NULL); const uint16_t* command, int length,
ClientData* client_data = NULL));
/** /**
* Run a JavaScript function in the debugger. * Run a JavaScript function in the debugger.
* \param fun the function to call * \param fun the function to call
* \param data passed as second argument to the function * \param data passed as second argument to the function
* With this call the debugger is entered and the function specified is called * With this call the debugger is entered and the function specified is called
* with the execution state as the first argument. This makes it possible to * with the execution state as the first argument. This makes it possible to
* get access to information otherwise not available during normal JavaScript * get access to information otherwise not available during normal JavaScript
* execution e.g. details on stack frames. Receiver of the function call will * execution e.g. details on stack frames. Receiver of the function call will
* be the debugger context global object, however this is a subject to change. * be the debugger context global object, however this is a subject to change.
* The following example shows a JavaScript function which when passed to * The following example shows a JavaScript function which when passed to
* v8::Debug::Call will return the current line of JavaScript execution. * v8::Debug::Call will return the current line of JavaScript execution.
* *
* \code * \code
* function frame_source_line(exec_state) { * function frame_source_line(exec_state) {
* return exec_state.frame(0).sourceLine(); * return exec_state.frame(0).sourceLine();
* } * }
* \endcode * \endcode
*/ */
// TODO(dcarney): data arg should be a MaybeLocal // TODO(dcarney): data arg should be a MaybeLocal
static MaybeLocal<Value> Call(Local<Context> context, static MaybeLocal<Value> Call(Local<Context> context,
v8::Local<v8::Function> fun, v8::Local<v8::Function> fun,
@ -202,8 +203,9 @@ class V8_EXPORT Debug {
/** /**
* Returns a mirror object for the given object. * Returns a mirror object for the given object.
*/ */
static MaybeLocal<Value> GetMirror(Local<Context> context, V8_DEPRECATED("No longer supported",
v8::Local<v8::Value> obj); static MaybeLocal<Value> GetMirror(Local<Context> context,
v8::Local<v8::Value> obj));
/** /**
* Makes V8 process all pending debug messages. * Makes V8 process all pending debug messages.
@ -236,7 +238,8 @@ class V8_EXPORT Debug {
* "Evaluate" debug command behavior currently is not specified in scope * "Evaluate" debug command behavior currently is not specified in scope
* of this method. * of this method.
*/ */
static void ProcessDebugMessages(Isolate* isolate); V8_DEPRECATED("No longer supported",
static void ProcessDebugMessages(Isolate* isolate));
/** /**
* Debugger is running in its own context which is entered while debugger * Debugger is running in its own context which is entered while debugger
@ -245,13 +248,16 @@ class V8_EXPORT Debug {
* to change. The Context exists only when the debugger is active, i.e. at * to change. The Context exists only when the debugger is active, i.e. at
* least one DebugEventListener or MessageHandler is set. * least one DebugEventListener or MessageHandler is set.
*/ */
static Local<Context> GetDebugContext(Isolate* isolate); V8_DEPRECATED("Use v8-inspector",
static Local<Context> GetDebugContext(Isolate* isolate));
/** /**
* While in the debug context, this method returns the top-most non-debug * While in the debug context, this method returns the top-most non-debug
* context, if it exists. * context, if it exists.
*/ */
static MaybeLocal<Context> GetDebuggedContext(Isolate* isolate); V8_DEPRECATED(
"No longer supported",
static MaybeLocal<Context> GetDebuggedContext(Isolate* isolate));
/** /**
* Enable/disable LiveEdit functionality for the given Isolate * Enable/disable LiveEdit functionality for the given Isolate

View File

@ -248,9 +248,9 @@ class V8_EXPORT V8Inspector {
class V8_EXPORT Channel { class V8_EXPORT Channel {
public: public:
virtual ~Channel() {} virtual ~Channel() {}
virtual void sendProtocolResponse(int callId, virtual void sendResponse(int callId,
const StringView& message) = 0; std::unique_ptr<StringBuffer> message) = 0;
virtual void sendProtocolNotification(const StringView& message) = 0; virtual void sendNotification(std::unique_ptr<StringBuffer> message) = 0;
virtual void flushProtocolNotifications() = 0; virtual void flushProtocolNotifications() = 0;
}; };
virtual std::unique_ptr<V8InspectorSession> connect( virtual std::unique_ptr<V8InspectorSession> connect(

33
deps/v8/include/v8-version-string.h vendored Normal file
View File

@ -0,0 +1,33 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_VERSION_STRING_H_
#define V8_VERSION_STRING_H_
#include "v8-version.h" // NOLINT(build/include)
// This is here rather than v8-version.h to keep that file simple and
// machine-processable.
#if V8_IS_CANDIDATE_VERSION
#define V8_CANDIDATE_STRING " (candidate)"
#else
#define V8_CANDIDATE_STRING ""
#endif
#define V8_SX(x) #x
#define V8_S(x) V8_SX(x)
#if V8_PATCH_LEVEL > 0
#define V8_VERSION_STRING \
V8_S(V8_MAJOR_VERSION) \
"." V8_S(V8_MINOR_VERSION) "." V8_S(V8_BUILD_NUMBER) "." V8_S( \
V8_PATCH_LEVEL) V8_CANDIDATE_STRING
#else
#define V8_VERSION_STRING \
V8_S(V8_MAJOR_VERSION) \
"." V8_S(V8_MINOR_VERSION) "." V8_S(V8_BUILD_NUMBER) V8_CANDIDATE_STRING
#endif
#endif // V8_VERSION_STRING_H_

View File

@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build // NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts. // system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 5 #define V8_MAJOR_VERSION 5
#define V8_MINOR_VERSION 6 #define V8_MINOR_VERSION 7
#define V8_BUILD_NUMBER 326 #define V8_BUILD_NUMBER 492
#define V8_PATCH_LEVEL 57 #define V8_PATCH_LEVEL 69
// Use 1 for candidates and 0 otherwise. // Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.) // (Boolean macro values are not supported by all preprocessors.)

465
deps/v8/include/v8.h vendored

File diff suppressed because it is too large Load Diff

View File

@ -4,7 +4,6 @@
version: 1 version: 1
cq_name: "v8" cq_name: "v8"
cq_status_url: "https://chromium-cq-status.appspot.com" cq_status_url: "https://chromium-cq-status.appspot.com"
hide_ref_in_committed_msg: true
commit_burst_delay: 60 commit_burst_delay: 60
max_commit_burst: 1 max_commit_burst: 1
target_ref: "refs/pending/heads/master" target_ref: "refs/pending/heads/master"

View File

@ -66,6 +66,7 @@
'V8 Linux64 TSAN': 'gn_release_x64_tsan', 'V8 Linux64 TSAN': 'gn_release_x64_tsan',
'V8 Linux - arm64 - sim - MSAN': 'gn_release_simulate_arm64_msan', 'V8 Linux - arm64 - sim - MSAN': 'gn_release_simulate_arm64_msan',
# Clusterfuzz. # Clusterfuzz.
'V8 Linux64 - release builder': 'gn_release_x64_correctness_fuzzer',
'V8 Linux64 ASAN no inline - release builder': 'V8 Linux64 ASAN no inline - release builder':
'gn_release_x64_asan_symbolized_edge_verify_heap', 'gn_release_x64_asan_symbolized_edge_verify_heap',
'V8 Linux64 ASAN - debug builder': 'gn_debug_x64_asan_edge', 'V8 Linux64 ASAN - debug builder': 'gn_debug_x64_asan_edge',
@ -116,8 +117,7 @@
'V8 Linux - s390 - sim': 'gyp_release_simulate_s390', 'V8 Linux - s390 - sim': 'gyp_release_simulate_s390',
'V8 Linux - s390x - sim': 'gyp_release_simulate_s390x', 'V8 Linux - s390x - sim': 'gyp_release_simulate_s390x',
# X87. # X87.
'V8 Linux - x87 - nosnap - debug builder': 'V8 Linux - x87 - nosnap - debug builder': 'gyp_debug_simulate_x87',
'gyp_debug_simulate_x87_no_snap',
}, },
'client.v8.branches': { 'client.v8.branches': {
'V8 Linux - beta branch': 'gn_release_x86', 'V8 Linux - beta branch': 'gn_release_x86',
@ -286,6 +286,8 @@
'v8_verify_heap'], 'v8_verify_heap'],
'gn_release_x64_clang': [ 'gn_release_x64_clang': [
'gn', 'release_bot', 'x64', 'clang', 'swarming'], 'gn', 'release_bot', 'x64', 'clang', 'swarming'],
'gn_release_x64_correctness_fuzzer' : [
'gn', 'release_bot', 'x64', 'v8_correctness_fuzzer'],
'gn_release_x64_internal': [ 'gn_release_x64_internal': [
'gn', 'release_bot', 'x64', 'swarming', 'v8_snapshot_internal'], 'gn', 'release_bot', 'x64', 'swarming', 'v8_snapshot_internal'],
'gn_release_x64_minimal_symbols': [ 'gn_release_x64_minimal_symbols': [
@ -359,9 +361,8 @@
'gn', 'release_trybot', 'x86', 'swarming'], 'gn', 'release_trybot', 'x86', 'swarming'],
# Gyp debug configs for simulators. # Gyp debug configs for simulators.
'gyp_debug_simulate_x87_no_snap': [ 'gyp_debug_simulate_x87': [
'gyp', 'debug_bot_static', 'simulate_x87', 'swarming', 'gyp', 'debug_bot_static', 'simulate_x87', 'swarming'],
'v8_snapshot_none'],
# Gyp debug configs for x86. # Gyp debug configs for x86.
'gyp_debug_x86': [ 'gyp_debug_x86': [
@ -626,6 +627,10 @@
'gyp_defines': 'v8_enable_i18n_support=0 icu_use_data_file_flag=0', 'gyp_defines': 'v8_enable_i18n_support=0 icu_use_data_file_flag=0',
}, },
'v8_correctness_fuzzer': {
'gn_args': 'v8_correctness_fuzzer=true',
},
'v8_disable_inspector': { 'v8_disable_inspector': {
'gn_args': 'v8_enable_inspector=false', 'gn_args': 'v8_enable_inspector=false',
'gyp_defines': 'v8_enable_inspector=0 ', 'gyp_defines': 'v8_enable_inspector=0 ',

2
deps/v8/src/DEPS vendored
View File

@ -10,7 +10,9 @@ include_rules = [
"+src/heap/heap-inl.h", "+src/heap/heap-inl.h",
"-src/inspector", "-src/inspector",
"-src/interpreter", "-src/interpreter",
"+src/interpreter/bytecode-array-accessor.h",
"+src/interpreter/bytecode-array-iterator.h", "+src/interpreter/bytecode-array-iterator.h",
"+src/interpreter/bytecode-array-random-iterator.h",
"+src/interpreter/bytecode-decoder.h", "+src/interpreter/bytecode-decoder.h",
"+src/interpreter/bytecode-flags.h", "+src/interpreter/bytecode-flags.h",
"+src/interpreter/bytecode-register.h", "+src/interpreter/bytecode-register.h",

View File

@ -167,16 +167,38 @@ void Accessors::ArrayLengthSetter(
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate); HandleScope scope(isolate);
DCHECK(Utils::OpenHandle(*name)->SameValue(isolate->heap()->length_string()));
Handle<JSReceiver> object = Utils::OpenHandle(*info.Holder()); Handle<JSReceiver> object = Utils::OpenHandle(*info.Holder());
Handle<JSArray> array = Handle<JSArray>::cast(object); Handle<JSArray> array = Handle<JSArray>::cast(object);
Handle<Object> length_obj = Utils::OpenHandle(*val); Handle<Object> length_obj = Utils::OpenHandle(*val);
bool was_readonly = JSArray::HasReadOnlyLength(array);
uint32_t length = 0; uint32_t length = 0;
if (!JSArray::AnythingToArrayLength(isolate, length_obj, &length)) { if (!JSArray::AnythingToArrayLength(isolate, length_obj, &length)) {
isolate->OptionalRescheduleException(false); isolate->OptionalRescheduleException(false);
return; return;
} }
if (!was_readonly && V8_UNLIKELY(JSArray::HasReadOnlyLength(array)) &&
length != array->length()->Number()) {
// AnythingToArrayLength() may have called setter re-entrantly and modified
// its property descriptor. Don't perform this check if "length" was
// previously readonly, as this may have been called during
// DefineOwnPropertyIgnoreAttributes().
if (info.ShouldThrowOnError()) {
Factory* factory = isolate->factory();
isolate->Throw(*factory->NewTypeError(
MessageTemplate::kStrictReadOnlyProperty, Utils::OpenHandle(*name),
i::Object::TypeOf(isolate, object), object));
isolate->OptionalRescheduleException(false);
} else {
info.GetReturnValue().Set(false);
}
return;
}
JSArray::SetLength(array, length); JSArray::SetLength(array, length);
uint32_t actual_new_len = 0; uint32_t actual_new_len = 0;
@ -517,34 +539,6 @@ Handle<AccessorInfo> Accessors::ScriptSourceMappingUrlInfo(
} }
//
// Accessors::ScriptIsEmbedderDebugScript
//
void Accessors::ScriptIsEmbedderDebugScriptGetter(
v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
Object* object = *Utils::OpenHandle(*info.Holder());
bool is_embedder_debug_script = Script::cast(JSValue::cast(object)->value())
->origin_options()
.IsEmbedderDebugScript();
Object* res = *isolate->factory()->ToBoolean(is_embedder_debug_script);
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(res, isolate)));
}
Handle<AccessorInfo> Accessors::ScriptIsEmbedderDebugScriptInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_CHAR_VECTOR("is_debugger_script")));
return MakeAccessor(isolate, name, &ScriptIsEmbedderDebugScriptGetter,
nullptr, attributes);
}
// //
// Accessors::ScriptGetContextData // Accessors::ScriptGetContextData
// //
@ -829,8 +823,8 @@ static Handle<Object> ArgumentsForInlinedFunction(
Handle<FixedArray> array = factory->NewFixedArray(argument_count); Handle<FixedArray> array = factory->NewFixedArray(argument_count);
bool should_deoptimize = false; bool should_deoptimize = false;
for (int i = 0; i < argument_count; ++i) { for (int i = 0; i < argument_count; ++i) {
// If we materialize any object, we should deopt because we might alias // If we materialize any object, we should deoptimize the frame because we
// an object that was eliminated by escape analysis. // might alias an object that was eliminated by escape analysis.
should_deoptimize = should_deoptimize || iter->IsMaterializedObject(); should_deoptimize = should_deoptimize || iter->IsMaterializedObject();
Handle<Object> value = iter->GetValue(); Handle<Object> value = iter->GetValue();
array->set(i, *value); array->set(i, *value);
@ -839,7 +833,7 @@ static Handle<Object> ArgumentsForInlinedFunction(
arguments->set_elements(*array); arguments->set_elements(*array);
if (should_deoptimize) { if (should_deoptimize) {
translated_values.StoreMaterializedValuesAndDeopt(); translated_values.StoreMaterializedValuesAndDeopt(frame);
} }
// Return the freshly allocated arguments object. // Return the freshly allocated arguments object.
@ -850,10 +844,10 @@ static Handle<Object> ArgumentsForInlinedFunction(
static int FindFunctionInFrame(JavaScriptFrame* frame, static int FindFunctionInFrame(JavaScriptFrame* frame,
Handle<JSFunction> function) { Handle<JSFunction> function) {
DisallowHeapAllocation no_allocation; DisallowHeapAllocation no_allocation;
List<JSFunction*> functions(2); List<FrameSummary> frames(2);
frame->GetFunctions(&functions); frame->Summarize(&frames);
for (int i = functions.length() - 1; i >= 0; i--) { for (int i = frames.length() - 1; i >= 0; i--) {
if (functions[i] == *function) return i; if (*frames[i].AsJavaScript().function() == *function) return i;
} }
return -1; return -1;
} }
@ -957,19 +951,16 @@ static inline bool AllowAccessToFunction(Context* current_context,
class FrameFunctionIterator { class FrameFunctionIterator {
public: public:
FrameFunctionIterator(Isolate* isolate, const DisallowHeapAllocation& promise) FrameFunctionIterator(Isolate* isolate, const DisallowHeapAllocation& promise)
: isolate_(isolate), : isolate_(isolate), frame_iterator_(isolate), frames_(2), index_(0) {
frame_iterator_(isolate), GetFrames();
functions_(2),
index_(0) {
GetFunctions();
} }
JSFunction* next() { JSFunction* next() {
while (true) { while (true) {
if (functions_.length() == 0) return NULL; if (frames_.length() == 0) return NULL;
JSFunction* next_function = functions_[index_]; JSFunction* next_function = *frames_[index_].AsJavaScript().function();
index_--; index_--;
if (index_ < 0) { if (index_ < 0) {
GetFunctions(); GetFrames();
} }
// Skip functions from other origins. // Skip functions from other origins.
if (!AllowAccessToFunction(isolate_->context(), next_function)) continue; if (!AllowAccessToFunction(isolate_->context(), next_function)) continue;
@ -990,18 +981,18 @@ class FrameFunctionIterator {
} }
private: private:
void GetFunctions() { void GetFrames() {
functions_.Rewind(0); frames_.Rewind(0);
if (frame_iterator_.done()) return; if (frame_iterator_.done()) return;
JavaScriptFrame* frame = frame_iterator_.frame(); JavaScriptFrame* frame = frame_iterator_.frame();
frame->GetFunctions(&functions_); frame->Summarize(&frames_);
DCHECK(functions_.length() > 0); DCHECK(frames_.length() > 0);
frame_iterator_.Advance(); frame_iterator_.Advance();
index_ = functions_.length() - 1; index_ = frames_.length() - 1;
} }
Isolate* isolate_; Isolate* isolate_;
JavaScriptFrameIterator frame_iterator_; JavaScriptFrameIterator frame_iterator_;
List<JSFunction*> functions_; List<FrameSummary> frames_;
int index_; int index_;
}; };
@ -1025,10 +1016,11 @@ MaybeHandle<JSFunction> FindCaller(Isolate* isolate,
if (caller == NULL) return MaybeHandle<JSFunction>(); if (caller == NULL) return MaybeHandle<JSFunction>();
} while (caller->shared()->is_toplevel()); } while (caller->shared()->is_toplevel());
// If caller is a built-in function and caller's caller is also built-in, // If caller is not user code and caller's caller is also not user code,
// use that instead. // use that instead.
JSFunction* potential_caller = caller; JSFunction* potential_caller = caller;
while (potential_caller != NULL && potential_caller->shared()->IsBuiltin()) { while (potential_caller != NULL &&
!potential_caller->shared()->IsUserJavaScript()) {
caller = potential_caller; caller = potential_caller;
potential_caller = it.next(); potential_caller = it.next();
} }
@ -1210,7 +1202,8 @@ void Accessors::ErrorStackGetter(
// If stack is still an accessor (this could have changed in the meantime // If stack is still an accessor (this could have changed in the meantime
// since FormatStackTrace can execute arbitrary JS), replace it with a data // since FormatStackTrace can execute arbitrary JS), replace it with a data
// property. // property.
Handle<Object> receiver = Utils::OpenHandle(*info.This()); Handle<Object> receiver =
Utils::OpenHandle(*v8::Local<v8::Value>(info.This()));
Handle<Name> name = Utils::OpenHandle(*key); Handle<Name> name = Utils::OpenHandle(*key);
if (IsAccessor(receiver, name, holder)) { if (IsAccessor(receiver, name, holder)) {
result = ReplaceAccessorWithDataProperty(isolate, receiver, holder, name, result = ReplaceAccessorWithDataProperty(isolate, receiver, holder, name,
@ -1236,8 +1229,8 @@ void Accessors::ErrorStackSetter(
const v8::PropertyCallbackInfo<v8::Boolean>& info) { const v8::PropertyCallbackInfo<v8::Boolean>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate); HandleScope scope(isolate);
Handle<JSObject> obj = Handle<JSObject> obj = Handle<JSObject>::cast(
Handle<JSObject>::cast(Utils::OpenHandle(*info.This())); Utils::OpenHandle(*v8::Local<v8::Value>(info.This())));
// Clear internal properties to avoid memory leaks. // Clear internal properties to avoid memory leaks.
Handle<Symbol> stack_trace_symbol = isolate->factory()->stack_trace_symbol(); Handle<Symbol> stack_trace_symbol = isolate->factory()->stack_trace_symbol();

View File

@ -43,7 +43,6 @@ class AccessorInfo;
V(ScriptType) \ V(ScriptType) \
V(ScriptSourceUrl) \ V(ScriptSourceUrl) \
V(ScriptSourceMappingUrl) \ V(ScriptSourceMappingUrl) \
V(ScriptIsEmbedderDebugScript) \
V(StringLength) V(StringLength)
#define ACCESSOR_SETTER_LIST(V) \ #define ACCESSOR_SETTER_LIST(V) \

View File

@ -32,23 +32,6 @@ void Malloced::Delete(void* p) {
} }
#ifdef DEBUG
static void* invalid = static_cast<void*>(NULL);
void* Embedded::operator new(size_t size) {
UNREACHABLE();
return invalid;
}
void Embedded::operator delete(void* p) {
UNREACHABLE();
}
#endif
char* StrDup(const char* str) { char* StrDup(const char* str) {
int length = StrLength(str); int length = StrLength(str);
char* result = NewArray<char>(length + 1); char* result = NewArray<char>(length + 1);

View File

@ -26,24 +26,9 @@ class V8_EXPORT_PRIVATE Malloced {
static void Delete(void* p); static void Delete(void* p);
}; };
// DEPRECATED
// A macro is used for defining the base class used for embedded instances. // TODO(leszeks): Delete this during a quiet period
// The reason is some compilers allocate a minimum of one word for the
// superclass. The macro prevents the use of new & delete in debug mode.
// In release mode we are not willing to pay this overhead.
#ifdef DEBUG
// Superclass for classes with instances allocated inside stack
// activations or inside other objects.
class Embedded {
public:
void* operator new(size_t size);
void operator delete(void* p);
};
#define BASE_EMBEDDED : public NON_EXPORTED_BASE(Embedded)
#else
#define BASE_EMBEDDED #define BASE_EMBEDDED
#endif
// Superclass for classes only using static method functions. // Superclass for classes only using static method functions.

View File

@ -10,6 +10,14 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#define SIDE_EFFECT_CHECK(ISOLATE, F, RETURN_TYPE) \
do { \
if (ISOLATE->needs_side_effect_check() && \
!PerformSideEffectCheck(ISOLATE, FUNCTION_ADDR(F))) { \
return Handle<RETURN_TYPE>(); \
} \
} while (false)
#define FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME(F) \ #define FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME(F) \
F(AccessorNameGetterCallback, "get", v8::Value, Object) \ F(AccessorNameGetterCallback, "get", v8::Value, Object) \
F(GenericNamedPropertyQueryCallback, "has", v8::Integer, Object) \ F(GenericNamedPropertyQueryCallback, "has", v8::Integer, Object) \
@ -19,6 +27,7 @@ namespace internal {
Handle<InternalReturn> PropertyCallbackArguments::Call(Function f, \ Handle<InternalReturn> PropertyCallbackArguments::Call(Function f, \
Handle<Name> name) { \ Handle<Name> name) { \
Isolate* isolate = this->isolate(); \ Isolate* isolate = this->isolate(); \
SIDE_EFFECT_CHECK(isolate, f, InternalReturn); \
RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Function); \ RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Function); \
VMState<EXTERNAL> state(isolate); \ VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \ ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
@ -43,6 +52,7 @@ FOR_EACH_CALLBACK_TABLE_MAPPING_1_NAME(WRITE_CALL_1_NAME)
Handle<InternalReturn> PropertyCallbackArguments::Call(Function f, \ Handle<InternalReturn> PropertyCallbackArguments::Call(Function f, \
uint32_t index) { \ uint32_t index) { \
Isolate* isolate = this->isolate(); \ Isolate* isolate = this->isolate(); \
SIDE_EFFECT_CHECK(isolate, f, InternalReturn); \
RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Function); \ RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::Function); \
VMState<EXTERNAL> state(isolate); \ VMState<EXTERNAL> state(isolate); \
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \ ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); \
@ -62,6 +72,7 @@ Handle<Object> PropertyCallbackArguments::Call(
GenericNamedPropertySetterCallback f, Handle<Name> name, GenericNamedPropertySetterCallback f, Handle<Name> name,
Handle<Object> value) { Handle<Object> value) {
Isolate* isolate = this->isolate(); Isolate* isolate = this->isolate();
SIDE_EFFECT_CHECK(isolate, f, Object);
RuntimeCallTimerScope timer( RuntimeCallTimerScope timer(
isolate, &RuntimeCallStats::GenericNamedPropertySetterCallback); isolate, &RuntimeCallStats::GenericNamedPropertySetterCallback);
VMState<EXTERNAL> state(isolate); VMState<EXTERNAL> state(isolate);
@ -77,6 +88,7 @@ Handle<Object> PropertyCallbackArguments::Call(
GenericNamedPropertyDefinerCallback f, Handle<Name> name, GenericNamedPropertyDefinerCallback f, Handle<Name> name,
const v8::PropertyDescriptor& desc) { const v8::PropertyDescriptor& desc) {
Isolate* isolate = this->isolate(); Isolate* isolate = this->isolate();
SIDE_EFFECT_CHECK(isolate, f, Object);
RuntimeCallTimerScope timer( RuntimeCallTimerScope timer(
isolate, &RuntimeCallStats::GenericNamedPropertyDefinerCallback); isolate, &RuntimeCallStats::GenericNamedPropertyDefinerCallback);
VMState<EXTERNAL> state(isolate); VMState<EXTERNAL> state(isolate);
@ -92,6 +104,7 @@ Handle<Object> PropertyCallbackArguments::Call(IndexedPropertySetterCallback f,
uint32_t index, uint32_t index,
Handle<Object> value) { Handle<Object> value) {
Isolate* isolate = this->isolate(); Isolate* isolate = this->isolate();
SIDE_EFFECT_CHECK(isolate, f, Object);
RuntimeCallTimerScope timer(isolate, RuntimeCallTimerScope timer(isolate,
&RuntimeCallStats::IndexedPropertySetterCallback); &RuntimeCallStats::IndexedPropertySetterCallback);
VMState<EXTERNAL> state(isolate); VMState<EXTERNAL> state(isolate);
@ -107,6 +120,7 @@ Handle<Object> PropertyCallbackArguments::Call(
IndexedPropertyDefinerCallback f, uint32_t index, IndexedPropertyDefinerCallback f, uint32_t index,
const v8::PropertyDescriptor& desc) { const v8::PropertyDescriptor& desc) {
Isolate* isolate = this->isolate(); Isolate* isolate = this->isolate();
SIDE_EFFECT_CHECK(isolate, f, Object);
RuntimeCallTimerScope timer( RuntimeCallTimerScope timer(
isolate, &RuntimeCallStats::IndexedPropertyDefinerCallback); isolate, &RuntimeCallStats::IndexedPropertyDefinerCallback);
VMState<EXTERNAL> state(isolate); VMState<EXTERNAL> state(isolate);
@ -121,6 +135,10 @@ Handle<Object> PropertyCallbackArguments::Call(
void PropertyCallbackArguments::Call(AccessorNameSetterCallback f, void PropertyCallbackArguments::Call(AccessorNameSetterCallback f,
Handle<Name> name, Handle<Object> value) { Handle<Name> name, Handle<Object> value) {
Isolate* isolate = this->isolate(); Isolate* isolate = this->isolate();
if (isolate->needs_side_effect_check() &&
!PerformSideEffectCheck(isolate, FUNCTION_ADDR(f))) {
return;
}
RuntimeCallTimerScope timer(isolate, RuntimeCallTimerScope timer(isolate,
&RuntimeCallStats::AccessorNameSetterCallback); &RuntimeCallStats::AccessorNameSetterCallback);
VMState<EXTERNAL> state(isolate); VMState<EXTERNAL> state(isolate);
@ -131,5 +149,7 @@ void PropertyCallbackArguments::Call(AccessorNameSetterCallback f,
f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info); f(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info);
} }
#undef SIDE_EFFECT_CHECK
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -4,6 +4,8 @@
#include "src/api-arguments.h" #include "src/api-arguments.h"
#include "src/debug/debug.h"
#include "src/objects-inl.h"
#include "src/tracing/trace-event.h" #include "src/tracing/trace-event.h"
#include "src/vm-state-inl.h" #include "src/vm-state-inl.h"
@ -12,6 +14,10 @@ namespace internal {
Handle<Object> FunctionCallbackArguments::Call(FunctionCallback f) { Handle<Object> FunctionCallbackArguments::Call(FunctionCallback f) {
Isolate* isolate = this->isolate(); Isolate* isolate = this->isolate();
if (isolate->needs_side_effect_check() &&
!isolate->debug()->PerformSideEffectCheckForCallback(FUNCTION_ADDR(f))) {
return Handle<Object>();
}
RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::FunctionCallback); RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::FunctionCallback);
VMState<EXTERNAL> state(isolate); VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
@ -23,6 +29,10 @@ Handle<Object> FunctionCallbackArguments::Call(FunctionCallback f) {
Handle<JSObject> PropertyCallbackArguments::Call( Handle<JSObject> PropertyCallbackArguments::Call(
IndexedPropertyEnumeratorCallback f) { IndexedPropertyEnumeratorCallback f) {
Isolate* isolate = this->isolate(); Isolate* isolate = this->isolate();
if (isolate->needs_side_effect_check() &&
!isolate->debug()->PerformSideEffectCheckForCallback(FUNCTION_ADDR(f))) {
return Handle<JSObject>();
}
RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::PropertyCallback); RuntimeCallTimerScope timer(isolate, &RuntimeCallStats::PropertyCallback);
VMState<EXTERNAL> state(isolate); VMState<EXTERNAL> state(isolate);
ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f)); ExternalCallbackScope call_scope(isolate, FUNCTION_ADDR(f));
@ -31,5 +41,10 @@ Handle<JSObject> PropertyCallbackArguments::Call(
return GetReturnValue<JSObject>(isolate); return GetReturnValue<JSObject>(isolate);
} }
bool PropertyCallbackArguments::PerformSideEffectCheck(Isolate* isolate,
Address function) {
return isolate->debug()->PerformSideEffectCheckForCallback(function);
}
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -136,6 +136,8 @@ class PropertyCallbackArguments
inline JSObject* holder() { inline JSObject* holder() {
return JSObject::cast(this->begin()[T::kHolderIndex]); return JSObject::cast(this->begin()[T::kHolderIndex]);
} }
bool PerformSideEffectCheck(Isolate* isolate, Address function);
}; };
class FunctionCallbackArguments class FunctionCallbackArguments

View File

@ -8,10 +8,11 @@
#include "src/api-experimental.h" #include "src/api-experimental.h"
#include "include/v8.h"
#include "include/v8-experimental.h" #include "include/v8-experimental.h"
#include "include/v8.h"
#include "src/api.h" #include "src/api.h"
#include "src/fast-accessor-assembler.h" #include "src/fast-accessor-assembler.h"
#include "src/objects-inl.h"
namespace { namespace {

View File

@ -395,6 +395,28 @@ MaybeHandle<JSObject> InstantiateObject(Isolate* isolate,
return result; return result;
} }
namespace {
MaybeHandle<Object> GetInstancePrototype(Isolate* isolate,
Object* function_template) {
// Enter a new scope. Recursion could otherwise create a lot of handles.
HandleScope scope(isolate);
Handle<JSFunction> parent_instance;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, parent_instance,
InstantiateFunction(
isolate,
handle(FunctionTemplateInfo::cast(function_template), isolate)),
JSFunction);
Handle<Object> instance_prototype;
// TODO(cbruni): decide what to do here.
ASSIGN_RETURN_ON_EXCEPTION(
isolate, instance_prototype,
JSObject::GetProperty(parent_instance,
isolate->factory()->prototype_string()),
JSFunction);
return scope.CloseAndEscape(instance_prototype);
}
} // namespace
MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate, MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
Handle<FunctionTemplateInfo> data, Handle<FunctionTemplateInfo> data,
@ -406,11 +428,18 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
return Handle<JSFunction>::cast(result); return Handle<JSFunction>::cast(result);
} }
} }
Handle<JSObject> prototype; Handle<Object> prototype;
if (!data->remove_prototype()) { if (!data->remove_prototype()) {
Object* prototype_templ = data->prototype_template(); Object* prototype_templ = data->prototype_template();
if (prototype_templ->IsUndefined(isolate)) { if (prototype_templ->IsUndefined(isolate)) {
prototype = isolate->factory()->NewJSObject(isolate->object_function()); Object* protoype_provider_templ = data->prototype_provider_template();
if (protoype_provider_templ->IsUndefined(isolate)) {
prototype = isolate->factory()->NewJSObject(isolate->object_function());
} else {
ASSIGN_RETURN_ON_EXCEPTION(
isolate, prototype,
GetInstancePrototype(isolate, protoype_provider_templ), JSFunction);
}
} else { } else {
ASSIGN_RETURN_ON_EXCEPTION( ASSIGN_RETURN_ON_EXCEPTION(
isolate, prototype, isolate, prototype,
@ -422,22 +451,12 @@ MaybeHandle<JSFunction> InstantiateFunction(Isolate* isolate,
} }
Object* parent = data->parent_template(); Object* parent = data->parent_template();
if (!parent->IsUndefined(isolate)) { if (!parent->IsUndefined(isolate)) {
// Enter a new scope. Recursion could otherwise create a lot of handles.
HandleScope scope(isolate);
Handle<JSFunction> parent_instance;
ASSIGN_RETURN_ON_EXCEPTION(
isolate, parent_instance,
InstantiateFunction(
isolate, handle(FunctionTemplateInfo::cast(parent), isolate)),
JSFunction);
// TODO(dcarney): decide what to do here.
Handle<Object> parent_prototype; Handle<Object> parent_prototype;
ASSIGN_RETURN_ON_EXCEPTION( ASSIGN_RETURN_ON_EXCEPTION(isolate, parent_prototype,
isolate, parent_prototype, GetInstancePrototype(isolate, parent),
JSObject::GetProperty(parent_instance, JSFunction);
isolate->factory()->prototype_string()), JSObject::ForceSetPrototype(Handle<JSObject>::cast(prototype),
JSFunction); parent_prototype);
JSObject::ForceSetPrototype(prototype, parent_prototype);
} }
} }
Handle<JSFunction> function = ApiNatives::CreateApiFunction( Handle<JSFunction> function = ApiNatives::CreateApiFunction(
@ -531,7 +550,7 @@ MaybeHandle<JSObject> ApiNatives::InstantiateRemoteObject(
void ApiNatives::AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info, void ApiNatives::AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
Handle<Name> name, Handle<Object> value, Handle<Name> name, Handle<Object> value,
PropertyAttributes attributes) { PropertyAttributes attributes) {
PropertyDetails details(attributes, DATA, 0, PropertyCellType::kNoCell); PropertyDetails details(kData, attributes, 0, PropertyCellType::kNoCell);
auto details_handle = handle(details.AsSmi(), isolate); auto details_handle = handle(details.AsSmi(), isolate);
Handle<Object> data[] = {name, details_handle, value}; Handle<Object> data[] = {name, details_handle, value};
AddPropertyToPropertyList(isolate, info, arraysize(data), data); AddPropertyToPropertyList(isolate, info, arraysize(data), data);
@ -543,7 +562,7 @@ void ApiNatives::AddDataProperty(Isolate* isolate, Handle<TemplateInfo> info,
PropertyAttributes attributes) { PropertyAttributes attributes) {
auto value = handle(Smi::FromInt(intrinsic), isolate); auto value = handle(Smi::FromInt(intrinsic), isolate);
auto intrinsic_marker = isolate->factory()->true_value(); auto intrinsic_marker = isolate->factory()->true_value();
PropertyDetails details(attributes, DATA, 0, PropertyCellType::kNoCell); PropertyDetails details(kData, attributes, 0, PropertyCellType::kNoCell);
auto details_handle = handle(details.AsSmi(), isolate); auto details_handle = handle(details.AsSmi(), isolate);
Handle<Object> data[] = {name, intrinsic_marker, details_handle, value}; Handle<Object> data[] = {name, intrinsic_marker, details_handle, value};
AddPropertyToPropertyList(isolate, info, arraysize(data), data); AddPropertyToPropertyList(isolate, info, arraysize(data), data);
@ -556,7 +575,7 @@ void ApiNatives::AddAccessorProperty(Isolate* isolate,
Handle<FunctionTemplateInfo> getter, Handle<FunctionTemplateInfo> getter,
Handle<FunctionTemplateInfo> setter, Handle<FunctionTemplateInfo> setter,
PropertyAttributes attributes) { PropertyAttributes attributes) {
PropertyDetails details(attributes, ACCESSOR, 0, PropertyCellType::kNoCell); PropertyDetails details(kAccessor, attributes, 0, PropertyCellType::kNoCell);
auto details_handle = handle(details.AsSmi(), isolate); auto details_handle = handle(details.AsSmi(), isolate);
Handle<Object> data[] = {name, details_handle, getter, setter}; Handle<Object> data[] = {name, details_handle, getter, setter};
AddPropertyToPropertyList(isolate, info, arraysize(data), data); AddPropertyToPropertyList(isolate, info, arraysize(data), data);
@ -606,7 +625,7 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
if (prototype->IsTheHole(isolate)) { if (prototype->IsTheHole(isolate)) {
prototype = isolate->factory()->NewFunctionPrototype(result); prototype = isolate->factory()->NewFunctionPrototype(result);
} else { } else if (obj->prototype_provider_template()->IsUndefined(isolate)) {
JSObject::AddProperty(Handle<JSObject>::cast(prototype), JSObject::AddProperty(Handle<JSObject>::cast(prototype),
isolate->factory()->constructor_string(), result, isolate->factory()->constructor_string(), result,
DONT_ENUM); DONT_ENUM);
@ -656,6 +675,12 @@ Handle<JSFunction> ApiNatives::CreateApiFunction(
// Mark as undetectable if needed. // Mark as undetectable if needed.
if (obj->undetectable()) { if (obj->undetectable()) {
// We only allow callable undetectable receivers here, since this whole
// undetectable business is only to support document.all, which is both
// undetectable and callable. If we ever see the need to have an object
// that is undetectable but not callable, we need to update the types.h
// to allow encoding this.
CHECK(!obj->instance_call_handler()->IsUndefined(isolate));
map->set_is_undetectable(); map->set_is_undetectable();
} }

729
deps/v8/src/api.cc vendored

File diff suppressed because it is too large Load Diff

3
deps/v8/src/api.h vendored
View File

@ -11,7 +11,6 @@
#include "src/factory.h" #include "src/factory.h"
#include "src/isolate.h" #include "src/isolate.h"
#include "src/list.h" #include "src/list.h"
#include "src/objects-inl.h"
namespace v8 { namespace v8 {
@ -110,7 +109,7 @@ class RegisteredExtension {
V(StackFrame, JSObject) \ V(StackFrame, JSObject) \
V(Proxy, JSProxy) \ V(Proxy, JSProxy) \
V(NativeWeakMap, JSWeakMap) \ V(NativeWeakMap, JSWeakMap) \
V(DebugInterface::Script, Script) V(debug::Script, Script)
class Utils { class Utils {
public: public:

View File

@ -41,7 +41,8 @@ class Arguments BASE_EMBEDDED {
index * kPointerSize)); index * kPointerSize));
} }
template <class S> Handle<S> at(int index) { template <class S = Object>
Handle<S> at(int index) {
Object** value = &((*this)[index]); Object** value = &((*this)[index]);
// This cast checks that the object we're accessing does indeed have the // This cast checks that the object we're accessing does indeed have the
// expected type. // expected type.

View File

@ -48,7 +48,7 @@ namespace internal {
bool CpuFeatures::SupportsCrankshaft() { return true; } bool CpuFeatures::SupportsCrankshaft() { return true; }
bool CpuFeatures::SupportsSimd128() { return false; } bool CpuFeatures::SupportsSimd128() { return true; }
int DoubleRegister::NumRegisters() { int DoubleRegister::NumRegisters() {
return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16; return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16;

View File

@ -351,13 +351,18 @@ Address RelocInfo::wasm_global_reference() {
return Assembler::target_address_at(pc_, host_); return Assembler::target_address_at(pc_, host_);
} }
uint32_t RelocInfo::wasm_function_table_size_reference() {
DCHECK(IsWasmFunctionTableSizeReference(rmode_));
return reinterpret_cast<uint32_t>(Assembler::target_address_at(pc_, host_));
}
void RelocInfo::unchecked_update_wasm_memory_reference( void RelocInfo::unchecked_update_wasm_memory_reference(
Address address, ICacheFlushMode flush_mode) { Address address, ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode); Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
} }
void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size, void RelocInfo::unchecked_update_wasm_size(uint32_t size,
ICacheFlushMode flush_mode) { ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(isolate_, pc_, host_, Assembler::set_target_address_at(isolate_, pc_, host_,
reinterpret_cast<Address>(size), flush_mode); reinterpret_cast<Address>(size), flush_mode);
} }
@ -483,30 +488,6 @@ void NeonMemOperand::SetAlignment(int align) {
} }
} }
NeonListOperand::NeonListOperand(DoubleRegister base, int registers_count) {
base_ = base;
switch (registers_count) {
case 1:
type_ = nlt_1;
break;
case 2:
type_ = nlt_2;
break;
case 3:
type_ = nlt_3;
break;
case 4:
type_ = nlt_4;
break;
default:
UNREACHABLE();
type_ = nlt_1;
break;
}
}
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Specific instructions, constants, and masks. // Specific instructions, constants, and masks.
@ -2873,7 +2854,6 @@ void Assembler::vmov(const DwVfpRegister dst,
vm); vm);
} }
void Assembler::vmov(const DwVfpRegister dst, void Assembler::vmov(const DwVfpRegister dst,
const VmovIndex index, const VmovIndex index,
const Register src, const Register src,
@ -2969,7 +2949,6 @@ void Assembler::vmov(const Register dst,
emit(cond | 0xE*B24 | B20 | sn*B16 | dst.code()*B12 | 0xA*B8 | n*B7 | B4); emit(cond | 0xE*B24 | B20 | sn*B16 | dst.code()*B12 | 0xA*B8 | n*B7 | B4);
} }
// Type of data to read from or write to VFP register. // Type of data to read from or write to VFP register.
// Used as specifier in generic vcvt instruction. // Used as specifier in generic vcvt instruction.
enum VFPType { S32, U32, F32, F64 }; enum VFPType { S32, U32, F32, F64 };
@ -3903,28 +3882,743 @@ void Assembler::vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src) {
(dt & NeonDataTypeSizeMask)*B19 | vd*B12 | 0xA*B8 | m*B5 | B4 | vm); (dt & NeonDataTypeSizeMask)*B19 | vd*B12 | 0xA*B8 | m*B5 | B4 | vm);
} }
void Assembler::vswp(DwVfpRegister srcdst0, DwVfpRegister srcdst1) { static int EncodeScalar(NeonDataType dt, int index) {
DCHECK(VfpRegisterIsAvailable(srcdst0)); int opc1_opc2 = 0;
DCHECK(VfpRegisterIsAvailable(srcdst1)); DCHECK_LE(0, index);
DCHECK(!srcdst0.is(kScratchDoubleReg)); switch (dt) {
DCHECK(!srcdst1.is(kScratchDoubleReg)); case NeonS8:
case NeonU8:
if (srcdst0.is(srcdst1)) return; // Swapping aliased registers emits nothing. DCHECK_GT(8, index);
opc1_opc2 = 0x8 | index;
if (CpuFeatures::IsSupported(NEON)) { break;
// Instruction details available in ARM DDI 0406C.b, A8.8.418. case NeonS16:
// 1111(31-28) | 00111(27-23) | D(22) | 110010(21-16) | case NeonU16:
// Vd(15-12) | 000000(11-6) | M(5) | 0(4) | Vm(3-0) DCHECK_GT(4, index);
int vd, d; opc1_opc2 = 0x1 | (index << 1);
srcdst0.split_code(&vd, &d); break;
int vm, m; case NeonS32:
srcdst1.split_code(&vm, &m); case NeonU32:
emit(0xFU * B28 | 7 * B23 | d * B22 | 0x32 * B16 | vd * B12 | m * B5 | vm); DCHECK_GT(2, index);
} else { opc1_opc2 = index << 2;
vmov(kScratchDoubleReg, srcdst0); break;
vmov(srcdst0, srcdst1); default:
vmov(srcdst1, kScratchDoubleReg); UNREACHABLE();
break;
} }
return (opc1_opc2 >> 2) * B21 | (opc1_opc2 & 0x3) * B5;
}
void Assembler::vmov(NeonDataType dt, DwVfpRegister dst, int index,
Register src) {
// Instruction details available in ARM DDI 0406C.b, A8.8.940.
// vmov ARM core register to scalar.
DCHECK(dt == NeonS32 || dt == NeonU32 || IsEnabled(NEON));
int vd, d;
dst.split_code(&vd, &d);
int opc1_opc2 = EncodeScalar(dt, index);
emit(0xEEu * B24 | vd * B16 | src.code() * B12 | 0xB * B8 | d * B7 | B4 |
opc1_opc2);
}
void Assembler::vmov(NeonDataType dt, Register dst, DwVfpRegister src,
int index) {
// Instruction details available in ARM DDI 0406C.b, A8.8.942.
// vmov Arm scalar to core register.
DCHECK(dt == NeonS32 || dt == NeonU32 || IsEnabled(NEON));
int vn, n;
src.split_code(&vn, &n);
int opc1_opc2 = EncodeScalar(dt, index);
int u = (dt & NeonDataTypeUMask) != 0 ? 1 : 0;
emit(0xEEu * B24 | u * B23 | B20 | vn * B16 | dst.code() * B12 | 0xB * B8 |
n * B7 | B4 | opc1_opc2);
}
void Assembler::vmov(const QwNeonRegister dst, const QwNeonRegister src) {
// Instruction details available in ARM DDI 0406C.b, A8-938.
// vmov is encoded as vorr.
vorr(dst, src, src);
}
void Assembler::vmvn(const QwNeonRegister dst, const QwNeonRegister src) {
DCHECK(IsEnabled(NEON));
// Instruction details available in ARM DDI 0406C.b, A8-966.
DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(VfpRegisterIsAvailable(src));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
emit(0x1E7U * B23 | d * B22 | 3 * B20 | vd * B12 | 0x17 * B6 | m * B5 | vm);
}
void Assembler::vswp(DwVfpRegister dst, DwVfpRegister src) {
// Instruction details available in ARM DDI 0406C.b, A8.8.418.
// 1111(31-28) | 00111(27-23) | D(22) | 110010(21-16) |
// Vd(15-12) | 000000(11-6) | M(5) | 0(4) | Vm(3-0)
DCHECK(IsEnabled(NEON));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
emit(0xFU * B28 | 7 * B23 | d * B22 | 0x32 * B16 | vd * B12 | m * B5 | vm);
}
void Assembler::vswp(QwNeonRegister dst, QwNeonRegister src) {
// Instruction details available in ARM DDI 0406C.b, A8.8.418.
// 1111(31-28) | 00111(27-23) | D(22) | 110010(21-16) |
// Vd(15-12) | 000000(11-6) | M(5) | 0(4) | Vm(3-0)
DCHECK(IsEnabled(NEON));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
emit(0xFU * B28 | 7 * B23 | d * B22 | 0x32 * B16 | vd * B12 | B6 | m * B5 |
vm);
}
void Assembler::vdup(NeonSize size, const QwNeonRegister dst,
const Register src) {
DCHECK(IsEnabled(NEON));
// Instruction details available in ARM DDI 0406C.b, A8-886.
int B = 0, E = 0;
switch (size) {
case Neon8:
B = 1;
break;
case Neon16:
E = 1;
break;
case Neon32:
break;
default:
UNREACHABLE();
break;
}
int vd, d;
dst.split_code(&vd, &d);
emit(al | 0x1D * B23 | B * B22 | B21 | vd * B16 | src.code() * B12 |
0xB * B8 | d * B7 | E * B5 | B4);
}
void Assembler::vdup(const QwNeonRegister dst, const SwVfpRegister src) {
DCHECK(IsEnabled(NEON));
// Instruction details available in ARM DDI 0406C.b, A8-884.
int index = src.code() & 1;
int d_reg = src.code() / 2;
int imm4 = 4 | index << 3; // esize = 32, index in bit 3.
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
DwVfpRegister::from_code(d_reg).split_code(&vm, &m);
emit(0x1E7U * B23 | d * B22 | 0x3 * B20 | imm4 * B16 | vd * B12 | 0x18 * B7 |
B6 | m * B5 | vm);
}
// Encode NEON vcvt.src_type.dst_type instruction.
static Instr EncodeNeonVCVT(const VFPType dst_type, const QwNeonRegister dst,
const VFPType src_type, const QwNeonRegister src) {
DCHECK(src_type != dst_type);
DCHECK(src_type == F32 || dst_type == F32);
// Instruction details available in ARM DDI 0406C.b, A8.8.868.
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
int op = 0;
if (src_type == F32) {
DCHECK(dst_type == S32 || dst_type == U32);
op = dst_type == U32 ? 3 : 2;
} else {
DCHECK(src_type == S32 || src_type == U32);
op = src_type == U32 ? 1 : 0;
}
return 0x1E7U * B23 | d * B22 | 0x3B * B16 | vd * B12 | 0x3 * B9 | op * B7 |
B6 | m * B5 | vm;
}
void Assembler::vcvt_f32_s32(const QwNeonRegister dst,
const QwNeonRegister src) {
DCHECK(IsEnabled(NEON));
DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(VfpRegisterIsAvailable(src));
emit(EncodeNeonVCVT(F32, dst, S32, src));
}
void Assembler::vcvt_f32_u32(const QwNeonRegister dst,
const QwNeonRegister src) {
DCHECK(IsEnabled(NEON));
DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(VfpRegisterIsAvailable(src));
emit(EncodeNeonVCVT(F32, dst, U32, src));
}
void Assembler::vcvt_s32_f32(const QwNeonRegister dst,
const QwNeonRegister src) {
DCHECK(IsEnabled(NEON));
DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(VfpRegisterIsAvailable(src));
emit(EncodeNeonVCVT(S32, dst, F32, src));
}
void Assembler::vcvt_u32_f32(const QwNeonRegister dst,
const QwNeonRegister src) {
DCHECK(IsEnabled(NEON));
DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(VfpRegisterIsAvailable(src));
emit(EncodeNeonVCVT(U32, dst, F32, src));
}
// op is instr->Bits(11, 7).
static Instr EncodeNeonUnaryOp(int op, bool is_float, NeonSize size,
const QwNeonRegister dst,
const QwNeonRegister src) {
DCHECK_IMPLIES(is_float, size == Neon32);
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
int F = is_float ? 1 : 0;
return 0x1E7U * B23 | d * B22 | 0x3 * B20 | size * B18 | B16 | vd * B12 |
F * B10 | B8 | op * B7 | B6 | m * B5 | vm;
}
void Assembler::vabs(const QwNeonRegister dst, const QwNeonRegister src) {
// Qd = vabs.f<size>(Qn, Qm) SIMD floating point absolute value.
// Instruction details available in ARM DDI 0406C.b, A8.8.824.
DCHECK(IsEnabled(NEON));
emit(EncodeNeonUnaryOp(0x6, true, Neon32, dst, src));
}
void Assembler::vabs(NeonSize size, const QwNeonRegister dst,
const QwNeonRegister src) {
// Qd = vabs.s<size>(Qn, Qm) SIMD integer absolute value.
// Instruction details available in ARM DDI 0406C.b, A8.8.824.
DCHECK(IsEnabled(NEON));
emit(EncodeNeonUnaryOp(0x6, false, size, dst, src));
}
void Assembler::vneg(const QwNeonRegister dst, const QwNeonRegister src) {
// Qd = vabs.f<size>(Qn, Qm) SIMD floating point negate.
// Instruction details available in ARM DDI 0406C.b, A8.8.968.
DCHECK(IsEnabled(NEON));
emit(EncodeNeonUnaryOp(0x7, true, Neon32, dst, src));
}
void Assembler::vneg(NeonSize size, const QwNeonRegister dst,
const QwNeonRegister src) {
// Qd = vabs.s<size>(Qn, Qm) SIMD integer negate.
// Instruction details available in ARM DDI 0406C.b, A8.8.968.
DCHECK(IsEnabled(NEON));
emit(EncodeNeonUnaryOp(0x7, false, size, dst, src));
}
void Assembler::veor(DwVfpRegister dst, DwVfpRegister src1,
DwVfpRegister src2) {
// Dd = veor(Dn, Dm) 64 bit integer exclusive OR.
// Instruction details available in ARM DDI 0406C.b, A8.8.888.
DCHECK(IsEnabled(NEON));
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
emit(0x1E6U * B23 | d * B22 | vn * B16 | vd * B12 | B8 | n * B7 | m * B5 |
B4 | vm);
}
enum BinaryBitwiseOp { VAND, VBIC, VBIF, VBIT, VBSL, VEOR, VORR, VORN };
static Instr EncodeNeonBinaryBitwiseOp(BinaryBitwiseOp op,
const QwNeonRegister dst,
const QwNeonRegister src1,
const QwNeonRegister src2) {
int op_encoding = 0;
switch (op) {
case VBIC:
op_encoding = 0x1 * B20;
break;
case VBIF:
op_encoding = B24 | 0x3 * B20;
break;
case VBIT:
op_encoding = B24 | 0x2 * B20;
break;
case VBSL:
op_encoding = B24 | 0x1 * B20;
break;
case VEOR:
op_encoding = B24;
break;
case VORR:
op_encoding = 0x2 * B20;
break;
case VORN:
op_encoding = 0x3 * B20;
break;
case VAND:
// op_encoding is 0.
break;
default:
UNREACHABLE();
break;
}
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
return 0x1E4U * B23 | op_encoding | d * B22 | vn * B16 | vd * B12 | B8 |
n * B7 | B6 | m * B5 | B4 | vm;
}
void Assembler::vand(QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
// Qd = vand(Qn, Qm) SIMD AND.
// Instruction details available in ARM DDI 0406C.b, A8.8.836.
DCHECK(IsEnabled(NEON));
emit(EncodeNeonBinaryBitwiseOp(VAND, dst, src1, src2));
}
void Assembler::vbsl(QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vbsl(Qn, Qm) SIMD bitwise select.
// Instruction details available in ARM DDI 0406C.b, A8-844.
emit(EncodeNeonBinaryBitwiseOp(VBSL, dst, src1, src2));
}
void Assembler::veor(QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
// Qd = veor(Qn, Qm) SIMD exclusive OR.
// Instruction details available in ARM DDI 0406C.b, A8.8.888.
DCHECK(IsEnabled(NEON));
emit(EncodeNeonBinaryBitwiseOp(VEOR, dst, src1, src2));
}
void Assembler::vorr(QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
// Qd = vorr(Qn, Qm) SIMD OR.
// Instruction details available in ARM DDI 0406C.b, A8.8.976.
DCHECK(IsEnabled(NEON));
emit(EncodeNeonBinaryBitwiseOp(VORR, dst, src1, src2));
}
void Assembler::vadd(QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vadd(Qn, Qm) SIMD floating point addition.
// Instruction details available in ARM DDI 0406C.b, A8-830.
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
emit(0x1E4U * B23 | d * B22 | vn * B16 | vd * B12 | 0xD * B8 | n * B7 | B6 |
m * B5 | vm);
}
void Assembler::vadd(NeonSize size, QwNeonRegister dst,
const QwNeonRegister src1, const QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vadd(Qn, Qm) SIMD integer addition.
// Instruction details available in ARM DDI 0406C.b, A8-828.
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
int sz = static_cast<int>(size);
emit(0x1E4U * B23 | d * B22 | sz * B20 | vn * B16 | vd * B12 | 0x8 * B8 |
n * B7 | B6 | m * B5 | vm);
}
void Assembler::vsub(QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vsub(Qn, Qm) SIMD floating point subtraction.
// Instruction details available in ARM DDI 0406C.b, A8-1086.
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
emit(0x1E4U * B23 | d * B22 | B21 | vn * B16 | vd * B12 | 0xD * B8 | n * B7 |
B6 | m * B5 | vm);
}
void Assembler::vsub(NeonSize size, QwNeonRegister dst,
const QwNeonRegister src1, const QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vsub(Qn, Qm) SIMD integer subtraction.
// Instruction details available in ARM DDI 0406C.b, A8-1084.
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
int sz = static_cast<int>(size);
emit(0x1E6U * B23 | d * B22 | sz * B20 | vn * B16 | vd * B12 | 0x8 * B8 |
n * B7 | B6 | m * B5 | vm);
}
void Assembler::vmul(QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vadd(Qn, Qm) SIMD floating point multiply.
// Instruction details available in ARM DDI 0406C.b, A8-958.
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
emit(0x1E6U * B23 | d * B22 | vn * B16 | vd * B12 | 0xD * B8 | n * B7 | B6 |
m * B5 | B4 | vm);
}
void Assembler::vmul(NeonSize size, QwNeonRegister dst,
const QwNeonRegister src1, const QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vadd(Qn, Qm) SIMD integer multiply.
// Instruction details available in ARM DDI 0406C.b, A8-960.
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
int sz = static_cast<int>(size);
emit(0x1E4U * B23 | d * B22 | sz * B20 | vn * B16 | vd * B12 | 0x9 * B8 |
n * B7 | B6 | m * B5 | B4 | vm);
}
static Instr EncodeNeonMinMax(bool is_min, QwNeonRegister dst,
QwNeonRegister src1, QwNeonRegister src2) {
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
int min = is_min ? 1 : 0;
return 0x1E4U * B23 | d * B22 | min * B21 | vn * B16 | vd * B12 | 0xF * B8 |
n * B7 | B6 | m * B5 | vm;
}
static Instr EncodeNeonMinMax(bool is_min, NeonDataType dt, QwNeonRegister dst,
QwNeonRegister src1, QwNeonRegister src2) {
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
int min = is_min ? 1 : 0;
int size = (dt & NeonDataTypeSizeMask) / 2;
int U = dt & NeonDataTypeUMask;
return 0x1E4U * B23 | U | d * B22 | size * B20 | vn * B16 | vd * B12 |
0x6 * B8 | B6 | m * B5 | min * B4 | vm;
}
void Assembler::vmin(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vmin(Qn, Qm) SIMD floating point MIN.
// Instruction details available in ARM DDI 0406C.b, A8-928.
emit(EncodeNeonMinMax(true, dst, src1, src2));
}
void Assembler::vmin(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vmin(Qn, Qm) SIMD integer MIN.
// Instruction details available in ARM DDI 0406C.b, A8-926.
emit(EncodeNeonMinMax(true, dt, dst, src1, src2));
}
void Assembler::vmax(QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vmax(Qn, Qm) SIMD floating point MAX.
// Instruction details available in ARM DDI 0406C.b, A8-928.
emit(EncodeNeonMinMax(false, dst, src1, src2));
}
void Assembler::vmax(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src1,
QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vmax(Qn, Qm) SIMD integer MAX.
// Instruction details available in ARM DDI 0406C.b, A8-926.
emit(EncodeNeonMinMax(false, dt, dst, src1, src2));
}
static Instr EncodeNeonEstimateOp(bool is_rsqrt, QwNeonRegister dst,
QwNeonRegister src) {
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
int rsqrt = is_rsqrt ? 1 : 0;
return 0x1E7U * B23 | d * B22 | 0x3B * B16 | vd * B12 | 0x5 * B8 |
rsqrt * B7 | B6 | m * B5 | vm;
}
void Assembler::vrecpe(const QwNeonRegister dst, const QwNeonRegister src) {
DCHECK(IsEnabled(NEON));
// Qd = vrecpe(Qm) SIMD reciprocal estimate.
// Instruction details available in ARM DDI 0406C.b, A8-1024.
emit(EncodeNeonEstimateOp(false, dst, src));
}
void Assembler::vrsqrte(const QwNeonRegister dst, const QwNeonRegister src) {
DCHECK(IsEnabled(NEON));
// Qd = vrsqrte(Qm) SIMD reciprocal square root estimate.
// Instruction details available in ARM DDI 0406C.b, A8-1038.
emit(EncodeNeonEstimateOp(true, dst, src));
}
static Instr EncodeNeonRefinementOp(bool is_rsqrt, QwNeonRegister dst,
QwNeonRegister src1, QwNeonRegister src2) {
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
int rsqrt = is_rsqrt ? 1 : 0;
return 0x1E4U * B23 | d * B22 | rsqrt * B21 | vn * B16 | vd * B12 | 0xF * B8 |
n * B7 | B6 | m * B5 | B4 | vm;
}
void Assembler::vrecps(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vrecps(Qn, Qm) SIMD reciprocal refinement step.
// Instruction details available in ARM DDI 0406C.b, A8-1026.
emit(EncodeNeonRefinementOp(false, dst, src1, src2));
}
void Assembler::vrsqrts(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vrsqrts(Qn, Qm) SIMD reciprocal square root refinement step.
// Instruction details available in ARM DDI 0406C.b, A8-1040.
emit(EncodeNeonRefinementOp(true, dst, src1, src2));
}
void Assembler::vtst(NeonSize size, QwNeonRegister dst,
const QwNeonRegister src1, const QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vtst(Qn, Qm) SIMD test integer operands.
// Instruction details available in ARM DDI 0406C.b, A8-1098.
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
int sz = static_cast<int>(size);
emit(0x1E4U * B23 | d * B22 | sz * B20 | vn * B16 | vd * B12 | 0x8 * B8 |
n * B7 | B6 | m * B5 | B4 | vm);
}
void Assembler::vceq(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vceq(Qn, Qm) SIMD floating point compare equal.
// Instruction details available in ARM DDI 0406C.b, A8-844.
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
emit(0x1E4U * B23 | d * B22 | vn * B16 | vd * B12 | 0xe * B8 | n * B7 | B6 |
m * B5 | vm);
}
void Assembler::vceq(NeonSize size, QwNeonRegister dst,
const QwNeonRegister src1, const QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vceq(Qn, Qm) SIMD integer compare equal.
// Instruction details available in ARM DDI 0406C.b, A8-844.
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
int sz = static_cast<int>(size);
emit(0x1E6U * B23 | d * B22 | sz * B20 | vn * B16 | vd * B12 | 0x8 * B8 |
n * B7 | B6 | m * B5 | B4 | vm);
}
static Instr EncodeNeonCompareOp(const QwNeonRegister dst,
const QwNeonRegister src1,
const QwNeonRegister src2, Condition cond) {
DCHECK(cond == ge || cond == gt);
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
int is_gt = (cond == gt) ? 1 : 0;
return 0x1E6U * B23 | d * B22 | is_gt * B21 | vn * B16 | vd * B12 | 0xe * B8 |
n * B7 | B6 | m * B5 | vm;
}
static Instr EncodeNeonCompareOp(NeonDataType dt, const QwNeonRegister dst,
const QwNeonRegister src1,
const QwNeonRegister src2, Condition cond) {
DCHECK(cond == ge || cond == gt);
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
int size = (dt & NeonDataTypeSizeMask) / 2;
int U = dt & NeonDataTypeUMask;
int is_ge = (cond == ge) ? 1 : 0;
return 0x1E4U * B23 | U | d * B22 | size * B20 | vn * B16 | vd * B12 |
0x3 * B8 | n * B7 | B6 | m * B5 | is_ge * B4 | vm;
}
void Assembler::vcge(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vcge(Qn, Qm) SIMD floating point compare greater or equal.
// Instruction details available in ARM DDI 0406C.b, A8-848.
emit(EncodeNeonCompareOp(dst, src1, src2, ge));
}
void Assembler::vcge(NeonDataType dt, QwNeonRegister dst,
const QwNeonRegister src1, const QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vcge(Qn, Qm) SIMD integer compare greater or equal.
// Instruction details available in ARM DDI 0406C.b, A8-848.
emit(EncodeNeonCompareOp(dt, dst, src1, src2, ge));
}
void Assembler::vcgt(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vcgt(Qn, Qm) SIMD floating point compare greater than.
// Instruction details available in ARM DDI 0406C.b, A8-852.
emit(EncodeNeonCompareOp(dst, src1, src2, gt));
}
void Assembler::vcgt(NeonDataType dt, QwNeonRegister dst,
const QwNeonRegister src1, const QwNeonRegister src2) {
DCHECK(IsEnabled(NEON));
// Qd = vcgt(Qn, Qm) SIMD integer compare greater than.
// Instruction details available in ARM DDI 0406C.b, A8-852.
emit(EncodeNeonCompareOp(dt, dst, src1, src2, gt));
}
void Assembler::vext(QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2, int bytes) {
DCHECK(IsEnabled(NEON));
// Qd = vext(Qn, Qm) SIMD byte extract.
// Instruction details available in ARM DDI 0406C.b, A8-890.
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
src1.split_code(&vn, &n);
int vm, m;
src2.split_code(&vm, &m);
DCHECK_GT(16, bytes);
emit(0x1E5U * B23 | d * B22 | 0x3 * B20 | vn * B16 | vd * B12 | bytes * B8 |
n * B7 | B6 | m * B5 | vm);
}
void Assembler::vzip(NeonSize size, QwNeonRegister dst,
const QwNeonRegister src) {
DCHECK(IsEnabled(NEON));
// Qd = vzip.<size>(Qn, Qm) SIMD zip (interleave).
// Instruction details available in ARM DDI 0406C.b, A8-1102.
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
int sz = static_cast<int>(size);
emit(0x1E7U * B23 | d * B22 | 0x3 * B20 | sz * B18 | 2 * B16 | vd * B12 |
0x3 * B7 | B6 | m * B5 | vm);
}
static Instr EncodeNeonVREV(NeonSize op_size, NeonSize size,
const QwNeonRegister dst,
const QwNeonRegister src) {
// Qd = vrev<op_size>.<size>(Qn, Qm) SIMD scalar reverse.
// Instruction details available in ARM DDI 0406C.b, A8-1028.
DCHECK_GT(op_size, static_cast<int>(size));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
src.split_code(&vm, &m);
int sz = static_cast<int>(size);
int op = static_cast<int>(Neon64) - static_cast<int>(op_size);
return 0x1E7U * B23 | d * B22 | 0x3 * B20 | sz * B18 | vd * B12 | op * B7 |
B6 | m * B5 | vm;
}
void Assembler::vrev16(NeonSize size, const QwNeonRegister dst,
const QwNeonRegister src) {
DCHECK(IsEnabled(NEON));
emit(EncodeNeonVREV(Neon16, size, dst, src));
}
void Assembler::vrev32(NeonSize size, const QwNeonRegister dst,
const QwNeonRegister src) {
DCHECK(IsEnabled(NEON));
emit(EncodeNeonVREV(Neon32, size, dst, src));
}
void Assembler::vrev64(NeonSize size, const QwNeonRegister dst,
const QwNeonRegister src) {
DCHECK(IsEnabled(NEON));
emit(EncodeNeonVREV(Neon64, size, dst, src));
}
// Encode NEON vtbl / vtbx instruction.
static Instr EncodeNeonVTB(const DwVfpRegister dst, const NeonListOperand& list,
const DwVfpRegister index, bool vtbx) {
// Dd = vtbl(table, Dm) SIMD vector permute, zero at out of range indices.
// Instruction details available in ARM DDI 0406C.b, A8-1094.
// Dd = vtbx(table, Dm) SIMD vector permute, skip out of range indices.
// Instruction details available in ARM DDI 0406C.b, A8-1094.
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
list.base().split_code(&vn, &n);
int vm, m;
index.split_code(&vm, &m);
int op = vtbx ? 1 : 0; // vtbl = 0, vtbx = 1.
return 0x1E7U * B23 | d * B22 | 0x3 * B20 | vn * B16 | vd * B12 | 0x2 * B10 |
list.length() * B8 | n * B7 | op * B6 | m * B5 | vm;
}
void Assembler::vtbl(const DwVfpRegister dst, const NeonListOperand& list,
const DwVfpRegister index) {
DCHECK(IsEnabled(NEON));
emit(EncodeNeonVTB(dst, list, index, false));
}
void Assembler::vtbx(const DwVfpRegister dst, const NeonListOperand& list,
const DwVfpRegister index) {
DCHECK(IsEnabled(NEON));
emit(EncodeNeonVTB(dst, list, index, true));
} }
// Pseudo instructions. // Pseudo instructions.

View File

@ -302,6 +302,20 @@ struct QwNeonRegister {
*m = (encoded_code & 0x10) >> 4; *m = (encoded_code & 0x10) >> 4;
*vm = encoded_code & 0x0F; *vm = encoded_code & 0x0F;
} }
DwVfpRegister low() const {
DwVfpRegister reg;
reg.reg_code = reg_code * 2;
DCHECK(reg.is_valid());
return reg;
}
DwVfpRegister high() const {
DwVfpRegister reg;
reg.reg_code = reg_code * 2 + 1;
DCHECK(reg.is_valid());
return reg;
}
int reg_code; int reg_code;
}; };
@ -403,9 +417,11 @@ const QwNeonRegister q15 = { 15 };
// compilation unit that includes this header doesn't use the variables. // compilation unit that includes this header doesn't use the variables.
#define kFirstCalleeSavedDoubleReg d8 #define kFirstCalleeSavedDoubleReg d8
#define kLastCalleeSavedDoubleReg d15 #define kLastCalleeSavedDoubleReg d15
// kDoubleRegZero and kScratchDoubleReg must pair to form kScratchQuadReg.
#define kDoubleRegZero d14 #define kDoubleRegZero d14
#define kScratchDoubleReg d15 #define kScratchDoubleReg d15
// After using kScratchQuadReg, kDoubleRegZero must be reset to 0.
#define kScratchQuadReg q7
// Coprocessor register // Coprocessor register
struct CRegister { struct CRegister {
@ -624,12 +640,26 @@ class NeonMemOperand BASE_EMBEDDED {
// Class NeonListOperand represents a list of NEON registers // Class NeonListOperand represents a list of NEON registers
class NeonListOperand BASE_EMBEDDED { class NeonListOperand BASE_EMBEDDED {
public: public:
explicit NeonListOperand(DoubleRegister base, int registers_count = 1); explicit NeonListOperand(DoubleRegister base, int register_count = 1)
: base_(base), register_count_(register_count) {}
explicit NeonListOperand(QwNeonRegister q_reg)
: base_(q_reg.low()), register_count_(2) {}
DoubleRegister base() const { return base_; } DoubleRegister base() const { return base_; }
NeonListType type() const { return type_; } int register_count() { return register_count_; }
int length() const { return register_count_ - 1; }
NeonListType type() const {
switch (register_count_) {
default: UNREACHABLE();
// Fall through.
case 1: return nlt_1;
case 2: return nlt_2;
case 3: return nlt_3;
case 4: return nlt_4;
}
}
private: private:
DoubleRegister base_; DoubleRegister base_;
NeonListType type_; int register_count_;
}; };
@ -1133,6 +1163,8 @@ class Assembler : public AssemblerBase {
void vmov(const DwVfpRegister dst, void vmov(const DwVfpRegister dst,
const DwVfpRegister src, const DwVfpRegister src,
const Condition cond = al); const Condition cond = al);
// TODO(bbudge) Replace uses of these with the more general core register to
// scalar register vmov's.
void vmov(const DwVfpRegister dst, void vmov(const DwVfpRegister dst,
const VmovIndex index, const VmovIndex index,
const Register src, const Register src,
@ -1313,8 +1345,86 @@ class Assembler : public AssemblerBase {
const NeonMemOperand& dst); const NeonMemOperand& dst);
void vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src); void vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src);
// Currently, vswp supports only D0 to D31. // Only unconditional core <-> scalar moves are currently supported.
void vswp(DwVfpRegister srcdst0, DwVfpRegister srcdst1); void vmov(NeonDataType dt, DwVfpRegister dst, int index, Register src);
void vmov(NeonDataType dt, Register dst, DwVfpRegister src, int index);
void vmov(const QwNeonRegister dst, const QwNeonRegister src);
void vmvn(const QwNeonRegister dst, const QwNeonRegister src);
void vswp(DwVfpRegister dst, DwVfpRegister src);
void vswp(QwNeonRegister dst, QwNeonRegister src);
// vdup conditional execution isn't supported.
void vdup(NeonSize size, const QwNeonRegister dst, const Register src);
void vdup(const QwNeonRegister dst, const SwVfpRegister src);
void vcvt_f32_s32(const QwNeonRegister dst, const QwNeonRegister src);
void vcvt_f32_u32(const QwNeonRegister dst, const QwNeonRegister src);
void vcvt_s32_f32(const QwNeonRegister dst, const QwNeonRegister src);
void vcvt_u32_f32(const QwNeonRegister dst, const QwNeonRegister src);
void vabs(const QwNeonRegister dst, const QwNeonRegister src);
void vabs(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src);
void vneg(const QwNeonRegister dst, const QwNeonRegister src);
void vneg(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src);
void veor(DwVfpRegister dst, DwVfpRegister src1, DwVfpRegister src2);
void vand(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vbsl(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void veor(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vorr(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2);
void vadd(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vadd(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vsub(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vsub(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vmul(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vmul(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vmin(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vmin(NeonDataType dt, const QwNeonRegister dst,
const QwNeonRegister src1, const QwNeonRegister src2);
void vmax(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vmax(NeonDataType dt, const QwNeonRegister dst,
const QwNeonRegister src1, const QwNeonRegister src2);
// vrecpe and vrsqrte only support floating point lanes.
void vrecpe(const QwNeonRegister dst, const QwNeonRegister src);
void vrsqrte(const QwNeonRegister dst, const QwNeonRegister src);
void vrecps(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vrsqrts(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vtst(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vceq(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vceq(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vcge(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vcge(NeonDataType dt, const QwNeonRegister dst,
const QwNeonRegister src1, const QwNeonRegister src2);
void vcgt(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2);
void vcgt(NeonDataType dt, const QwNeonRegister dst,
const QwNeonRegister src1, const QwNeonRegister src2);
void vext(const QwNeonRegister dst, const QwNeonRegister src1,
const QwNeonRegister src2, int bytes);
void vzip(NeonSize size, const QwNeonRegister dst, const QwNeonRegister src);
void vrev16(NeonSize size, const QwNeonRegister dst,
const QwNeonRegister src);
void vrev32(NeonSize size, const QwNeonRegister dst,
const QwNeonRegister src);
void vrev64(NeonSize size, const QwNeonRegister dst,
const QwNeonRegister src);
void vtbl(const DwVfpRegister dst, const NeonListOperand& list,
const DwVfpRegister index);
void vtbx(const DwVfpRegister dst, const NeonListOperand& list,
const DwVfpRegister index);
// Pseudo instructions // Pseudo instructions
@ -1395,9 +1505,6 @@ class Assembler : public AssemblerBase {
// Debugging // Debugging
// Mark generator continuation.
void RecordGeneratorContinuation();
// Mark address of a debug break slot. // Mark address of a debug break slot.
void RecordDebugBreakSlot(RelocInfo::Mode mode); void RecordDebugBreakSlot(RelocInfo::Mode mode);
@ -1611,6 +1718,12 @@ class Assembler : public AssemblerBase {
(reg.reg_code < LowDwVfpRegister::kMaxNumLowRegisters); (reg.reg_code < LowDwVfpRegister::kMaxNumLowRegisters);
} }
bool VfpRegisterIsAvailable(QwNeonRegister reg) {
DCHECK(reg.is_valid());
return IsEnabled(VFP32DREGS) ||
(reg.reg_code < LowDwVfpRegister::kMaxNumLowRegisters / 2);
}
private: private:
int next_buffer_check_; // pc offset of next buffer check int next_buffer_check_; // pc offset of next buffer check

View File

@ -33,17 +33,6 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kNewArray); __ TailCallRuntime(Runtime::kNewArray);
} }
void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
descriptor->Initialize(r0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
}
void FastFunctionBindStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
descriptor->Initialize(r0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
}
static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow, static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
Condition cond); Condition cond);
static void EmitSmiNonsmiComparison(MacroAssembler* masm, static void EmitSmiNonsmiComparison(MacroAssembler* masm,
@ -635,8 +624,11 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
if (cc == eq) { if (cc == eq) {
{ {
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(lhs, rhs); __ Push(cp);
__ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual); __ Call(strict() ? isolate()->builtins()->StrictEqual()
: isolate()->builtins()->Equal(),
RelocInfo::CODE_TARGET);
__ Pop(cp);
} }
// Turn true into 0 and false into some non-zero value. // Turn true into 0 and false into some non-zero value.
STATIC_ASSERT(EQUAL == 0); STATIC_ASSERT(EQUAL == 0);
@ -805,7 +797,6 @@ void CodeStub::GenerateFPStubs(Isolate* isolate) {
SaveFPRegsMode mode = kSaveFPRegs; SaveFPRegsMode mode = kSaveFPRegs;
CEntryStub(isolate, 1, mode).GetCode(); CEntryStub(isolate, 1, mode).GetCode();
StoreBufferOverflowStub(isolate, mode).GetCode(); StoreBufferOverflowStub(isolate, mode).GetCode();
isolate->set_fp_stubs_generated(true);
} }
@ -2075,46 +2066,6 @@ void StringCharFromCodeGenerator::GenerateSlow(
__ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase); __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
} }
enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
Register dest,
Register src,
Register count,
Register scratch,
String::Encoding encoding) {
if (FLAG_debug_code) {
// Check that destination is word aligned.
__ tst(dest, Operand(kPointerAlignmentMask));
__ Check(eq, kDestinationOfCopyNotAligned);
}
// Assumes word reads and writes are little endian.
// Nothing to do for zero characters.
Label done;
if (encoding == String::TWO_BYTE_ENCODING) {
__ add(count, count, Operand(count), SetCC);
}
Register limit = count; // Read until dest equals this.
__ add(limit, dest, Operand(count));
Label loop_entry, loop;
// Copy bytes from src to dest until dest hits limit.
__ b(&loop_entry);
__ bind(&loop);
__ ldrb(scratch, MemOperand(src, 1, PostIndex), lt);
__ strb(scratch, MemOperand(dest, 1, PostIndex));
__ bind(&loop_entry);
__ cmp(dest, Operand(limit));
__ b(lt, &loop);
__ bind(&done);
}
void StringHelper::GenerateFlatOneByteStringEquals( void StringHelper::GenerateFlatOneByteStringEquals(
MacroAssembler* masm, Register left, Register right, Register scratch1, MacroAssembler* masm, Register left, Register right, Register scratch1,
Register scratch2, Register scratch3) { Register scratch2, Register scratch3) {
@ -2690,84 +2641,6 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ b(ne, miss); __ b(ne, miss);
} }
// Probe the name dictionary in the |elements| register. Jump to the
// |done| label if a property with the given name is found. Jump to
// the |miss| label otherwise.
// If lookup was successful |scratch2| will be equal to elements + 4 * index.
void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
Label* miss,
Label* done,
Register elements,
Register name,
Register scratch1,
Register scratch2) {
DCHECK(!elements.is(scratch1));
DCHECK(!elements.is(scratch2));
DCHECK(!name.is(scratch1));
DCHECK(!name.is(scratch2));
__ AssertName(name);
// Compute the capacity mask.
__ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset));
__ SmiUntag(scratch1);
__ sub(scratch1, scratch1, Operand(1));
// Generate an unrolled loop that performs a few probes before
// giving up. Measurements done on Gmail indicate that 2 probes
// cover ~93% of loads from dictionaries.
for (int i = 0; i < kInlinedProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
__ ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
if (i > 0) {
// Add the probe offset (i + i * i) left shifted to avoid right shifting
// the hash in a separate instruction. The value hash + i + i * i is right
// shifted in the following and instruction.
DCHECK(NameDictionary::GetProbeOffset(i) <
1 << (32 - Name::kHashFieldOffset));
__ add(scratch2, scratch2, Operand(
NameDictionary::GetProbeOffset(i) << Name::kHashShift));
}
__ and_(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
// Scale the index by multiplying by the entry size.
STATIC_ASSERT(NameDictionary::kEntrySize == 3);
// scratch2 = scratch2 * 3.
__ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
// Check if the key is identical to the name.
__ add(scratch2, elements, Operand(scratch2, LSL, 2));
__ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset));
__ cmp(name, Operand(ip));
__ b(eq, done);
}
const int spill_mask =
(lr.bit() | r6.bit() | r5.bit() | r4.bit() |
r3.bit() | r2.bit() | r1.bit() | r0.bit()) &
~(scratch1.bit() | scratch2.bit());
__ stm(db_w, sp, spill_mask);
if (name.is(r0)) {
DCHECK(!elements.is(r1));
__ Move(r1, name);
__ Move(r0, elements);
} else {
__ Move(r0, elements);
__ Move(r1, name);
}
NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
__ CallStub(&stub);
__ cmp(r0, Operand::Zero());
__ mov(scratch2, Operand(r2));
__ ldm(ia_w, sp, spill_mask);
__ b(ne, done);
__ b(eq, miss);
}
void NameDictionaryLookupStub::Generate(MacroAssembler* masm) { void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
// This stub overrides SometimesSetsUpAFrame() to return false. That means // This stub overrides SometimesSetsUpAFrame() to return false. That means
// we cannot call anything that could cause a GC from this stub. // we cannot call anything that could cause a GC from this stub.
@ -3057,238 +2930,6 @@ void CallICTrampolineStub::Generate(MacroAssembler* masm) {
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
} }
static void HandleArrayCases(MacroAssembler* masm, Register feedback,
Register receiver_map, Register scratch1,
Register scratch2, bool is_polymorphic,
Label* miss) {
// feedback initially contains the feedback array
Label next_loop, prepare_next;
Label start_polymorphic;
Register cached_map = scratch1;
__ ldr(cached_map,
FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
__ ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
__ cmp(receiver_map, cached_map);
__ b(ne, &start_polymorphic);
// found, now call handler.
Register handler = feedback;
__ ldr(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
__ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
Register length = scratch2;
__ bind(&start_polymorphic);
__ ldr(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
if (!is_polymorphic) {
// If the IC could be monomorphic we have to make sure we don't go past the
// end of the feedback array.
__ cmp(length, Operand(Smi::FromInt(2)));
__ b(eq, miss);
}
Register too_far = length;
Register pointer_reg = feedback;
// +-----+------+------+-----+-----+ ... ----+
// | map | len | wm0 | h0 | wm1 | hN |
// +-----+------+------+-----+-----+ ... ----+
// 0 1 2 len-1
// ^ ^
// | |
// pointer_reg too_far
// aka feedback scratch2
// also need receiver_map
// use cached_map (scratch1) to look in the weak map values.
__ add(too_far, feedback, Operand::PointerOffsetFromSmiKey(length));
__ add(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(pointer_reg, feedback,
Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag));
__ bind(&next_loop);
__ ldr(cached_map, MemOperand(pointer_reg));
__ ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
__ cmp(receiver_map, cached_map);
__ b(ne, &prepare_next);
__ ldr(handler, MemOperand(pointer_reg, kPointerSize));
__ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
__ bind(&prepare_next);
__ add(pointer_reg, pointer_reg, Operand(kPointerSize * 2));
__ cmp(pointer_reg, too_far);
__ b(lt, &next_loop);
// We exhausted our array of map handler pairs.
__ jmp(miss);
}
static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
Register receiver_map, Register feedback,
Register vector, Register slot,
Register scratch, Label* compare_map,
Label* load_smi_map, Label* try_array) {
__ JumpIfSmi(receiver, load_smi_map);
__ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ bind(compare_map);
Register cached_map = scratch;
// Move the weak map into the weak_cell register.
__ ldr(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
__ cmp(cached_map, receiver_map);
__ b(ne, try_array);
Register handler = feedback;
__ add(handler, vector, Operand::PointerOffsetFromSmiKey(slot));
__ ldr(handler,
FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
__ add(pc, handler, Operand(Code::kHeaderSize - kHeapObjectTag));
}
void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
KeyedStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void KeyedStoreICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
Register receiver_map, Register scratch1,
Register scratch2, Label* miss) {
// feedback initially contains the feedback array
Label next_loop, prepare_next;
Label start_polymorphic;
Label transition_call;
Register cached_map = scratch1;
Register too_far = scratch2;
Register pointer_reg = feedback;
__ ldr(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
// +-----+------+------+-----+-----+-----+ ... ----+
// | map | len | wm0 | wt0 | h0 | wm1 | hN |
// +-----+------+------+-----+-----+ ----+ ... ----+
// 0 1 2 len-1
// ^ ^
// | |
// pointer_reg too_far
// aka feedback scratch2
// also need receiver_map
// use cached_map (scratch1) to look in the weak map values.
__ add(too_far, feedback, Operand::PointerOffsetFromSmiKey(too_far));
__ add(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(pointer_reg, feedback,
Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag));
__ bind(&next_loop);
__ ldr(cached_map, MemOperand(pointer_reg));
__ ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
__ cmp(receiver_map, cached_map);
__ b(ne, &prepare_next);
// Is it a transitioning store?
__ ldr(too_far, MemOperand(pointer_reg, kPointerSize));
__ CompareRoot(too_far, Heap::kUndefinedValueRootIndex);
__ b(ne, &transition_call);
__ ldr(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
__ add(pc, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
__ bind(&transition_call);
__ ldr(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
__ JumpIfSmi(too_far, miss);
__ ldr(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
// Load the map into the correct register.
DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
__ mov(feedback, too_far);
__ add(pc, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag));
__ bind(&prepare_next);
__ add(pointer_reg, pointer_reg, Operand(kPointerSize * 3));
__ cmp(pointer_reg, too_far);
__ b(lt, &next_loop);
// We exhausted our array of map handler pairs.
__ jmp(miss);
}
void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // r1
Register key = StoreWithVectorDescriptor::NameRegister(); // r2
Register vector = StoreWithVectorDescriptor::VectorRegister(); // r3
Register slot = StoreWithVectorDescriptor::SlotRegister(); // r4
DCHECK(StoreWithVectorDescriptor::ValueRegister().is(r0)); // r0
Register feedback = r5;
Register receiver_map = r6;
Register scratch1 = r9;
__ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
__ ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
// Try to quickly handle the monomorphic case without knowing for sure
// if we have a weak cell in feedback. We do know it's safe to look
// at WeakCell::kValueOffset.
Label try_array, load_smi_map, compare_map;
Label not_array, miss;
HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
scratch1, &compare_map, &load_smi_map, &try_array);
__ bind(&try_array);
// Is it a fixed array?
__ ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
__ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex);
__ b(ne, &not_array);
// We have a polymorphic element handler.
Label polymorphic, try_poly_name;
__ bind(&polymorphic);
// We are using register r8, which is used for the embedded constant pool
// when FLAG_enable_embedded_constant_pool is true.
DCHECK(!FLAG_enable_embedded_constant_pool);
Register scratch2 = r8;
HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2,
&miss);
__ bind(&not_array);
// Is it generic?
__ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex);
__ b(ne, &try_poly_name);
Handle<Code> megamorphic_stub =
KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
__ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
__ bind(&try_poly_name);
// We might have a name in feedback, and a fixed array in the next slot.
__ cmp(key, feedback);
__ b(ne, &miss);
// If the name comparison succeeded, we know we have a fixed array with
// at least one map/handler pair.
__ add(feedback, vector, Operand::PointerOffsetFromSmiKey(slot));
__ ldr(feedback,
FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false,
&miss);
__ bind(&miss);
KeyedStoreIC::GenerateMiss(masm);
__ bind(&load_smi_map);
__ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
__ jmp(&compare_map);
}
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) { if (masm->isolate()->function_entry_hook() != NULL) {
ProfileEntryHookStub stub(masm->isolate()); ProfileEntryHookStub stub(masm->isolate());
@ -3648,123 +3289,6 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
} }
void FastNewObjectStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r1 : target
// -- r3 : new target
// -- cp : context
// -- lr : return address
// -----------------------------------
__ AssertFunction(r1);
__ AssertReceiver(r3);
// Verify that the new target is a JSFunction.
Label new_object;
__ CompareObjectType(r3, r2, r2, JS_FUNCTION_TYPE);
__ b(ne, &new_object);
// Load the initial map and verify that it's in fact a map.
__ ldr(r2, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
__ JumpIfSmi(r2, &new_object);
__ CompareObjectType(r2, r0, r0, MAP_TYPE);
__ b(ne, &new_object);
// Fall back to runtime if the target differs from the new target's
// initial map constructor.
__ ldr(r0, FieldMemOperand(r2, Map::kConstructorOrBackPointerOffset));
__ cmp(r0, r1);
__ b(ne, &new_object);
// Allocate the JSObject on the heap.
Label allocate, done_allocate;
__ ldrb(r4, FieldMemOperand(r2, Map::kInstanceSizeOffset));
__ Allocate(r4, r0, r5, r6, &allocate, SIZE_IN_WORDS);
__ bind(&done_allocate);
// Initialize the JSObject fields.
__ str(r2, FieldMemOperand(r0, JSObject::kMapOffset));
__ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
__ str(r3, FieldMemOperand(r0, JSObject::kPropertiesOffset));
__ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
__ add(r1, r0, Operand(JSObject::kHeaderSize - kHeapObjectTag));
// ----------- S t a t e -------------
// -- r0 : result (tagged)
// -- r1 : result fields (untagged)
// -- r5 : result end (untagged)
// -- r2 : initial map
// -- cp : context
// -- lr : return address
// -----------------------------------
// Perform in-object slack tracking if requested.
Label slack_tracking;
STATIC_ASSERT(Map::kNoSlackTracking == 0);
__ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
__ ldr(r3, FieldMemOperand(r2, Map::kBitField3Offset));
__ tst(r3, Operand(Map::ConstructionCounter::kMask));
__ b(ne, &slack_tracking);
{
// Initialize all in-object fields with undefined.
__ InitializeFieldsWithFiller(r1, r5, r6);
__ Ret();
}
__ bind(&slack_tracking);
{
// Decrease generous allocation count.
STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
__ sub(r3, r3, Operand(1 << Map::ConstructionCounter::kShift));
__ str(r3, FieldMemOperand(r2, Map::kBitField3Offset));
// Initialize the in-object fields with undefined.
__ ldrb(r4, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
__ sub(r4, r5, Operand(r4, LSL, kPointerSizeLog2));
__ InitializeFieldsWithFiller(r1, r4, r6);
// Initialize the remaining (reserved) fields with one pointer filler map.
__ LoadRoot(r6, Heap::kOnePointerFillerMapRootIndex);
__ InitializeFieldsWithFiller(r1, r5, r6);
// Check if we can finalize the instance size.
STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
__ tst(r3, Operand(Map::ConstructionCounter::kMask));
__ Ret(ne);
// Finalize the instance size.
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
__ Push(r0, r2);
__ CallRuntime(Runtime::kFinalizeInstanceSize);
__ Pop(r0);
}
__ Ret();
}
// Fall back to %AllocateInNewSpace.
__ bind(&allocate);
{
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
__ mov(r4, Operand(r4, LSL, kPointerSizeLog2 + 1));
__ Push(r2, r4);
__ CallRuntime(Runtime::kAllocateInNewSpace);
__ Pop(r2);
}
__ ldrb(r5, FieldMemOperand(r2, Map::kInstanceSizeOffset));
__ add(r5, r0, Operand(r5, LSL, kPointerSizeLog2));
STATIC_ASSERT(kHeapObjectTag == 1);
__ sub(r5, r5, Operand(kHeapObjectTag));
__ b(&done_allocate);
// Fall back to %NewObject.
__ bind(&new_object);
__ Push(r1, r3);
__ TailCallRuntime(Runtime::kNewObject);
}
void FastNewRestParameterStub::Generate(MacroAssembler* masm) { void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- r1 : function // -- r1 : function

View File

@ -16,17 +16,6 @@ void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
class StringHelper : public AllStatic { class StringHelper : public AllStatic {
public: public:
// Generate code for copying a large number of characters. This function
// is allowed to spend extra time setting up conditions to make copying
// faster. Copying of overlapping regions is not supported.
// Dest register ends at the position after the last character written.
static void GenerateCopyCharacters(MacroAssembler* masm,
Register dest,
Register src,
Register count,
Register scratch,
String::Encoding encoding);
// Compares two flat one-byte strings and returns result in r0. // Compares two flat one-byte strings and returns result in r0.
static void GenerateCompareFlatOneByteStrings( static void GenerateCompareFlatOneByteStrings(
MacroAssembler* masm, Register left, Register right, Register scratch1, MacroAssembler* masm, Register left, Register right, Register scratch1,
@ -280,14 +269,6 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
Handle<Name> name, Handle<Name> name,
Register scratch0); Register scratch0);
static void GeneratePositiveLookup(MacroAssembler* masm,
Label* miss,
Label* done,
Register elements,
Register name,
Register r0,
Register r1);
bool SometimesSetsUpAFrame() override { return false; } bool SometimesSetsUpAFrame() override { return false; }
private: private:

View File

@ -317,337 +317,6 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
#define __ ACCESS_MASM(masm) #define __ ACCESS_MASM(masm)
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register target_map,
AllocationSiteMode mode,
Label* allocation_memento_found) {
Register scratch_elements = r4;
DCHECK(!AreAliased(receiver, key, value, target_map,
scratch_elements));
if (mode == TRACK_ALLOCATION_SITE) {
DCHECK(allocation_memento_found != NULL);
__ JumpIfJSArrayHasAllocationMemento(
receiver, scratch_elements, allocation_memento_found);
}
// Set transitioned map.
__ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver,
HeapObject::kMapOffset,
target_map,
r9,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
}
void ElementsTransitionGenerator::GenerateSmiToDouble(
MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register target_map,
AllocationSiteMode mode,
Label* fail) {
// Register lr contains the return address.
Label loop, entry, convert_hole, gc_required, only_change_map, done;
Register elements = r4;
Register length = r5;
Register array = r6;
Register array_end = array;
// target_map parameter can be clobbered.
Register scratch1 = target_map;
Register scratch2 = r9;
// Verify input registers don't conflict with locals.
DCHECK(!AreAliased(receiver, key, value, target_map,
elements, length, array, scratch2));
if (mode == TRACK_ALLOCATION_SITE) {
__ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
}
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
__ b(eq, &only_change_map);
__ push(lr);
__ ldr(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
// length: number of elements (smi-tagged)
// Allocate new FixedDoubleArray.
// Use lr as a temporary register.
__ mov(lr, Operand(length, LSL, 2));
__ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize));
__ Allocate(lr, array, elements, scratch2, &gc_required, DOUBLE_ALIGNMENT);
__ sub(array, array, Operand(kHeapObjectTag));
// array: destination FixedDoubleArray, not tagged as heap object.
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
// r4: source FixedArray.
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
__ str(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
// Update receiver's map.
__ str(scratch2, MemOperand(array, HeapObject::kMapOffset));
__ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver,
HeapObject::kMapOffset,
target_map,
scratch2,
kLRHasBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created FixedDoubleArray.
__ add(scratch1, array, Operand(kHeapObjectTag));
__ str(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ RecordWriteField(receiver,
JSObject::kElementsOffset,
scratch1,
scratch2,
kLRHasBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Prepare for conversion loop.
__ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(scratch2, array, Operand(FixedDoubleArray::kHeaderSize));
__ add(array_end, scratch2, Operand(length, LSL, 2));
// Repurpose registers no longer in use.
Register hole_lower = elements;
Register hole_upper = length;
__ mov(hole_lower, Operand(kHoleNanLower32));
__ mov(hole_upper, Operand(kHoleNanUpper32));
// scratch1: begin of source FixedArray element fields, not tagged
// hole_lower: kHoleNanLower32
// hole_upper: kHoleNanUpper32
// array_end: end of destination FixedDoubleArray, not tagged
// scratch2: begin of FixedDoubleArray element fields, not tagged
__ b(&entry);
__ bind(&only_change_map);
__ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver,
HeapObject::kMapOffset,
target_map,
scratch2,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ b(&done);
// Call into runtime if GC is required.
__ bind(&gc_required);
__ pop(lr);
__ b(fail);
// Convert and copy elements.
__ bind(&loop);
__ ldr(lr, MemOperand(scratch1, 4, PostIndex));
// lr: current element
__ UntagAndJumpIfNotSmi(lr, lr, &convert_hole);
// Normal smi, convert to double and store.
__ vmov(s0, lr);
__ vcvt_f64_s32(d0, s0);
__ vstr(d0, scratch2, 0);
__ add(scratch2, scratch2, Operand(8));
__ b(&entry);
// Hole found, store the-hole NaN.
__ bind(&convert_hole);
if (FLAG_debug_code) {
// Restore a "smi-untagged" heap object.
__ SmiTag(lr);
__ orr(lr, lr, Operand(1));
__ CompareRoot(lr, Heap::kTheHoleValueRootIndex);
__ Assert(eq, kObjectFoundInSmiOnlyArray);
}
__ Strd(hole_lower, hole_upper, MemOperand(scratch2, 8, PostIndex));
__ bind(&entry);
__ cmp(scratch2, array_end);
__ b(lt, &loop);
__ pop(lr);
__ bind(&done);
}
void ElementsTransitionGenerator::GenerateDoubleToObject(
MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register target_map,
AllocationSiteMode mode,
Label* fail) {
// Register lr contains the return address.
Label entry, loop, convert_hole, gc_required, only_change_map;
Register elements = r4;
Register array = r6;
Register length = r5;
Register scratch = r9;
// Verify input registers don't conflict with locals.
DCHECK(!AreAliased(receiver, key, value, target_map,
elements, array, length, scratch));
if (mode == TRACK_ALLOCATION_SITE) {
__ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
}
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
__ b(eq, &only_change_map);
__ push(lr);
__ Push(target_map, receiver, key, value);
__ ldr(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
// elements: source FixedDoubleArray
// length: number of elements (smi-tagged)
// Allocate new FixedArray.
// Re-use value and target_map registers, as they have been saved on the
// stack.
Register array_size = value;
Register allocate_scratch = target_map;
__ mov(array_size, Operand(FixedDoubleArray::kHeaderSize));
__ add(array_size, array_size, Operand(length, LSL, 1));
__ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
NO_ALLOCATION_FLAGS);
// array: destination FixedArray, tagged as heap object
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
__ str(length, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
__ str(scratch, FieldMemOperand(array, HeapObject::kMapOffset));
__ sub(array, array, Operand(kHeapObjectTag));
// Prepare for conversion loop.
Register src_elements = elements;
Register dst_elements = target_map;
Register dst_end = length;
Register heap_number_map = scratch;
__ add(src_elements, elements,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
__ add(dst_elements, array, Operand(FixedArray::kHeaderSize));
__ add(dst_end, dst_elements, Operand(length, LSL, 1));
// Allocating heap numbers in the loop below can fail and cause a jump to
// gc_required. We can't leave a partly initialized FixedArray behind,
// so pessimistically fill it with holes now.
Label initialization_loop, initialization_loop_entry;
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
__ b(&initialization_loop_entry);
__ bind(&initialization_loop);
__ str(scratch, MemOperand(dst_elements, kPointerSize, PostIndex));
__ bind(&initialization_loop_entry);
__ cmp(dst_elements, dst_end);
__ b(lt, &initialization_loop);
__ add(dst_elements, array, Operand(FixedArray::kHeaderSize));
__ add(array, array, Operand(kHeapObjectTag));
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
// Using offsetted addresses in src_elements to fully take advantage of
// post-indexing.
// dst_elements: begin of destination FixedArray element fields, not tagged
// src_elements: begin of source FixedDoubleArray element fields,
// not tagged, +4
// dst_end: end of destination FixedArray, not tagged
// array: destination FixedArray
// heap_number_map: heap number map
__ b(&entry);
// Call into runtime if GC is required.
__ bind(&gc_required);
__ Pop(target_map, receiver, key, value);
__ pop(lr);
__ b(fail);
__ bind(&loop);
Register upper_bits = key;
__ ldr(upper_bits, MemOperand(src_elements, 8, PostIndex));
// upper_bits: current element's upper 32 bit
// src_elements: address of next element's upper 32 bit
__ cmp(upper_bits, Operand(kHoleNanUpper32));
__ b(eq, &convert_hole);
// Non-hole double, copy value into a heap number.
Register heap_number = receiver;
Register scratch2 = value;
__ AllocateHeapNumber(heap_number, scratch2, lr, heap_number_map,
&gc_required);
// heap_number: new heap number
__ ldr(scratch2, MemOperand(src_elements, 12, NegOffset));
__ Strd(scratch2, upper_bits,
FieldMemOperand(heap_number, HeapNumber::kValueOffset));
__ mov(scratch2, dst_elements);
__ str(heap_number, MemOperand(dst_elements, 4, PostIndex));
__ RecordWrite(array,
scratch2,
heap_number,
kLRHasBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ b(&entry);
// Replace the-hole NaN with the-hole pointer.
__ bind(&convert_hole);
__ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
__ str(scratch2, MemOperand(dst_elements, 4, PostIndex));
__ bind(&entry);
__ cmp(dst_elements, dst_end);
__ b(lt, &loop);
__ Pop(target_map, receiver, key, value);
// Replace receiver's backing store with newly created and filled FixedArray.
__ str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ RecordWriteField(receiver,
JSObject::kElementsOffset,
array,
scratch,
kLRHasBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ pop(lr);
__ bind(&only_change_map);
// Update receiver's map.
__ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver,
HeapObject::kMapOffset,
target_map,
scratch,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
}
void StringCharLoadGenerator::Generate(MacroAssembler* masm, void StringCharLoadGenerator::Generate(MacroAssembler* masm,
Register string, Register string,
Register index, Register index,
@ -771,31 +440,23 @@ bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
return result; return result;
} }
Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) {
if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge;
void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age, Address target_address = Memory::Address_at(
MarkingParity* parity) { sequence + (kNoCodeAgeSequenceLength - Assembler::kInstrSize));
if (IsYoungSequence(isolate, sequence)) { Code* stub = GetCodeFromTargetAddress(target_address);
*age = kNoAgeCodeAge; return GetAgeOfCodeAgeStub(stub);
*parity = NO_MARKING_PARITY;
} else {
Address target_address = Memory::Address_at(
sequence + (kNoCodeAgeSequenceLength - Assembler::kInstrSize));
Code* stub = GetCodeFromTargetAddress(target_address);
GetCodeAgeAndParity(stub, age, parity);
}
} }
void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
void Code::PatchPlatformCodeAge(Isolate* isolate, Code::Age age) {
byte* sequence,
Code::Age age,
MarkingParity parity) {
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length(); uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
if (age == kNoAgeCodeAge) { if (age == kNoAgeCodeAge) {
isolate->code_aging_helper()->CopyYoungSequenceTo(sequence); isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
Assembler::FlushICache(isolate, sequence, young_length); Assembler::FlushICache(isolate, sequence, young_length);
} else { } else {
Code* stub = GetCodeAgeStub(isolate, age, parity); Code* stub = GetCodeAgeStub(isolate, age);
CodePatcher patcher(isolate, sequence, CodePatcher patcher(isolate, sequence,
young_length / Assembler::kInstrSize); young_length / Assembler::kInstrSize);
patcher.masm()->add(r0, pc, Operand(-8)); patcher.masm()->add(r0, pc, Operand(-8));
@ -804,7 +465,6 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
} }
} }
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -190,6 +190,7 @@ enum {
B7 = 1 << 7, B7 = 1 << 7,
B8 = 1 << 8, B8 = 1 << 8,
B9 = 1 << 9, B9 = 1 << 9,
B10 = 1 << 10,
B12 = 1 << 12, B12 = 1 << 12,
B16 = 1 << 16, B16 = 1 << 16,
B17 = 1 << 17, B17 = 1 << 17,
@ -218,7 +219,6 @@ enum {
kOff8Mask = (1 << 8) - 1 kOff8Mask = (1 << 8) - 1
}; };
enum BarrierOption { enum BarrierOption {
OSHLD = 0x1, OSHLD = 0x1,
OSHST = 0x2, OSHST = 0x2,
@ -327,12 +327,12 @@ enum LFlag {
// NEON data type // NEON data type
enum NeonDataType { enum NeonDataType {
NeonS8 = 0x1, // U = 0, imm3 = 0b001 NeonS8 = 0x1, // U = 0, imm3 = 0b001
NeonS16 = 0x2, // U = 0, imm3 = 0b010 NeonS16 = 0x2, // U = 0, imm3 = 0b010
NeonS32 = 0x4, // U = 0, imm3 = 0b100 NeonS32 = 0x4, // U = 0, imm3 = 0b100
NeonU8 = 1 << 24 | 0x1, // U = 1, imm3 = 0b001 NeonU8 = 1 << 24 | 0x1, // U = 1, imm3 = 0b001
NeonU16 = 1 << 24 | 0x2, // U = 1, imm3 = 0b010 NeonU16 = 1 << 24 | 0x2, // U = 1, imm3 = 0b010
NeonU32 = 1 << 24 | 0x4, // U = 1, imm3 = 0b100 NeonU32 = 1 << 24 | 0x4, // U = 1, imm3 = 0b100
NeonDataTypeSizeMask = 0x7, NeonDataTypeSizeMask = 0x7,
NeonDataTypeUMask = 1 << 24 NeonDataTypeUMask = 1 << 24
}; };
@ -374,10 +374,10 @@ const int32_t kDefaultStopCode = -1;
// Type of VFP register. Determines register encoding. // Type of VFP register. Determines register encoding.
enum VFPRegPrecision { enum VFPRegPrecision {
kSinglePrecision = 0, kSinglePrecision = 0,
kDoublePrecision = 1 kDoublePrecision = 1,
kSimd128Precision = 2
}; };
// VFP FPSCR constants. // VFP FPSCR constants.
enum VFPConversionMode { enum VFPConversionMode {
kFPSCRRounding = 0, kFPSCRRounding = 0,
@ -667,15 +667,22 @@ class Instruction {
private: private:
// Join split register codes, depending on single or double precision. // Join split register codes, depending on register precision.
// four_bit is the position of the least-significant bit of the four // four_bit is the position of the least-significant bit of the four
// bit specifier. one_bit is the position of the additional single bit // bit specifier. one_bit is the position of the additional single bit
// specifier. // specifier.
inline int VFPGlueRegValue(VFPRegPrecision pre, int four_bit, int one_bit) { inline int VFPGlueRegValue(VFPRegPrecision pre, int four_bit, int one_bit) {
if (pre == kSinglePrecision) { if (pre == kSinglePrecision) {
return (Bits(four_bit + 3, four_bit) << 1) | Bit(one_bit); return (Bits(four_bit + 3, four_bit) << 1) | Bit(one_bit);
} else {
int reg_num = (Bit(one_bit) << 4) | Bits(four_bit + 3, four_bit);
if (pre == kDoublePrecision) {
return reg_num;
}
DCHECK_EQ(kSimd128Precision, pre);
DCHECK_EQ(reg_num & 1, 0);
return reg_num / 2;
} }
return (Bit(one_bit) << 4) | Bits(four_bit + 3, four_bit);
} }
// We need to prevent the creation of instances of class Instruction. // We need to prevent the creation of instances of class Instruction.

View File

@ -1419,6 +1419,9 @@ int Decoder::DecodeType7(Instruction* instr) {
// Sd = vsqrt(Sm) // Sd = vsqrt(Sm)
// vmrs // vmrs
// vmsr // vmsr
// Qd = vdup.size(Qd, Rt)
// vmov.size: Dd[i] = Rt
// vmov.sign.size: Rt = Dn[i]
void Decoder::DecodeTypeVFP(Instruction* instr) { void Decoder::DecodeTypeVFP(Instruction* instr) {
VERIFY((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) ); VERIFY((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
VERIFY(instr->Bits(11, 9) == 0x5); VERIFY(instr->Bits(11, 9) == 0x5);
@ -1531,21 +1534,71 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
if ((instr->VCValue() == 0x0) && if ((instr->VCValue() == 0x0) &&
(instr->VAValue() == 0x0)) { (instr->VAValue() == 0x0)) {
DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr); DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr);
} else if ((instr->VLValue() == 0x0) && } else if ((instr->VLValue() == 0x0) && (instr->VCValue() == 0x1)) {
(instr->VCValue() == 0x1) && if (instr->Bit(23) == 0) {
(instr->Bit(23) == 0x0)) { int opc1_opc2 = (instr->Bits(22, 21) << 2) | instr->Bits(6, 5);
if (instr->Bit(21) == 0x0) { if ((opc1_opc2 & 0xb) == 0) {
Format(instr, "vmov'cond.32 'Dd[0], 'rt"); // NeonS32/NeonU32
if (instr->Bit(21) == 0x0) {
Format(instr, "vmov'cond.32 'Dd[0], 'rt");
} else {
Format(instr, "vmov'cond.32 'Dd[1], 'rt");
}
} else {
int vd = instr->VFPNRegValue(kDoublePrecision);
int rt = instr->RtValue();
if ((opc1_opc2 & 0x8) != 0) {
// NeonS8 / NeonU8
int i = opc1_opc2 & 0x7;
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vmov.8 d%d[%d], r%d", vd, i, rt);
} else if ((opc1_opc2 & 0x1) != 0) {
// NeonS16 / NeonU16
int i = (opc1_opc2 >> 1) & 0x3;
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vmov.16 d%d[%d], r%d", vd, i, rt);
} else {
Unknown(instr);
}
}
} else { } else {
Format(instr, "vmov'cond.32 'Dd[1], 'rt"); int size = 32;
if (instr->Bit(5) != 0)
size = 16;
else if (instr->Bit(22) != 0)
size = 8;
int Vd = instr->VFPNRegValue(kSimd128Precision);
int Rt = instr->RtValue();
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vdup.%i q%d, r%d", size, Vd, Rt);
} }
} else if ((instr->VLValue() == 0x1) && } else if ((instr->VLValue() == 0x1) && (instr->VCValue() == 0x1)) {
(instr->VCValue() == 0x1) && int opc1_opc2 = (instr->Bits(22, 21) << 2) | instr->Bits(6, 5);
(instr->Bit(23) == 0x0)) { if ((opc1_opc2 & 0xb) == 0) {
if (instr->Bit(21) == 0x0) { // NeonS32 / NeonU32
Format(instr, "vmov'cond.32 'rt, 'Dd[0]"); if (instr->Bit(21) == 0x0) {
Format(instr, "vmov'cond.32 'rt, 'Dd[0]");
} else {
Format(instr, "vmov'cond.32 'rt, 'Dd[1]");
}
} else { } else {
Format(instr, "vmov'cond.32 'rt, 'Dd[1]"); const char* sign = instr->Bit(23) != 0 ? "u" : "s";
int rt = instr->RtValue();
int vn = instr->VFPNRegValue(kDoublePrecision);
if ((opc1_opc2 & 0x8) != 0) {
// NeonS8 / NeonU8
int i = opc1_opc2 & 0x7;
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vmov.%s8 r%d, d%d[%d]", sign, rt, vn, i);
} else if ((opc1_opc2 & 0x1) != 0) {
// NeonS16 / NeonU16
int i = (opc1_opc2 >> 1) & 0x3;
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vmov.%s16 r%d, d%d[%d]",
sign, rt, vn, i);
} else {
Unknown(instr);
}
} }
} else if ((instr->VCValue() == 0x0) && } else if ((instr->VCValue() == 0x0) &&
(instr->VAValue() == 0x7) && (instr->VAValue() == 0x7) &&
@ -1563,6 +1616,8 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
Format(instr, "vmrs'cond 'rt, FPSCR"); Format(instr, "vmrs'cond 'rt, FPSCR");
} }
} }
} else {
Unknown(instr); // Not used by V8.
} }
} }
} }
@ -1801,6 +1856,104 @@ static const char* const barrier_option_names[] = {
void Decoder::DecodeSpecialCondition(Instruction* instr) { void Decoder::DecodeSpecialCondition(Instruction* instr) {
switch (instr->SpecialValue()) { switch (instr->SpecialValue()) {
case 4:
if (instr->Bits(11, 8) == 1 && instr->Bits(21, 20) == 2 &&
instr->Bit(6) == 1 && instr->Bit(4) == 1) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
if (Vm == Vn) {
// vmov Qd, Qm
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vmov q%d, q%d", Vd, Vm);
} else {
// vorr Qd, Qm, Qn.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vorr q%d, q%d, q%d", Vd, Vn, Vm);
}
} else if (instr->Bits(11, 8) == 8) {
const char* op = (instr->Bit(4) == 0) ? "vadd" : "vtst";
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
// vadd/vtst.i<size> Qd, Qm, Qn.
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "%s.i%d q%d, q%d, q%d", op,
size, Vd, Vn, Vm);
} else if (instr->Bits(11, 8) == 0xd && instr->Bit(4) == 0) {
const char* op = (instr->Bits(21, 20) == 0) ? "vadd" : "vsub";
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
// vadd/vsub.f32 Qd, Qm, Qn.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"%s.f32 q%d, q%d, q%d", op, Vd, Vn, Vm);
} else if (instr->Bits(11, 8) == 0x9 && instr->Bit(6) == 1 &&
instr->Bit(4) == 1) {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
// vmul.i<size> Qd, Qm, Qn.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vmul.i%d q%d, q%d, q%d", size, Vd, Vn, Vm);
} else if (instr->Bits(11, 8) == 0xe && instr->Bits(21, 20) == 0 &&
instr->Bit(4) == 0) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
// vceq.f32 Qd, Qm, Qn.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vceq.f32 q%d, q%d, q%d", Vd, Vn, Vm);
} else if (instr->Bits(11, 8) == 1 && instr->Bits(21, 20) == 0 &&
instr->Bit(6) == 1 && instr->Bit(4) == 1) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
// vand Qd, Qm, Qn.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vand q%d, q%d, q%d", Vd, Vn, Vm);
} else if (instr->Bits(11, 8) == 0x3) {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
const char* op = (instr->Bit(4) == 1) ? "vcge" : "vcgt";
// vcge/vcgt.s<size> Qd, Qm, Qn.
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "%s.s%d q%d, q%d, q%d", op,
size, Vd, Vn, Vm);
} else if (instr->Bits(11, 8) == 0xf && instr->Bit(20) == 0 &&
instr->Bit(6) == 1) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
if (instr->Bit(4) == 1) {
// vrecps/vrsqrts.f32 Qd, Qm, Qn.
const char* op = instr->Bit(21) == 0 ? "vrecps" : "vrsqrts";
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"%s.f32 q%d, q%d, q%d", op, Vd, Vn, Vm);
} else {
// vmin/max.f32 Qd, Qm, Qn.
const char* op = instr->Bit(21) == 1 ? "vmin" : "vmax";
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"%s.f32 q%d, q%d, q%d", op, Vd, Vn, Vm);
}
} else if (instr->Bits(11, 8) == 0x6) {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
// vmin/vmax.s<size> Qd, Qm, Qn.
const char* op = instr->Bit(4) == 1 ? "vmin" : "vmax";
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "%s.s%d q%d, q%d, q%d", op,
size, Vd, Vn, Vm);
} else {
Unknown(instr);
}
break;
case 5: case 5:
if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) && if ((instr->Bits(18, 16) == 0) && (instr->Bits(11, 6) == 0x28) &&
(instr->Bit(4) == 1)) { (instr->Bit(4) == 1)) {
@ -1811,6 +1964,96 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
int imm3 = instr->Bits(21, 19); int imm3 = instr->Bits(21, 19);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vmovl.s%d q%d, d%d", imm3*8, Vd, Vm); "vmovl.s%d q%d, d%d", imm3*8, Vd, Vm);
} else if (instr->Bits(21, 20) == 3 && instr->Bit(4) == 0) {
// vext.8 Qd, Qm, Qn, imm4
int imm4 = instr->Bits(11, 8);
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vext.8 q%d, q%d, q%d, #%d",
Vd, Vn, Vm, imm4);
} else {
Unknown(instr);
}
break;
case 6:
if (instr->Bits(11, 8) == 8) {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
if (instr->Bit(4) == 0) {
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vsub.i%d q%d, q%d, q%d",
size, Vd, Vn, Vm);
} else {
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vceq.i%d q%d, q%d, q%d",
size, Vd, Vn, Vm);
}
} else if (instr->Bits(11, 8) == 1 && instr->Bits(21, 20) == 1 &&
instr->Bit(4) == 1) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vbsl q%d, q%d, q%d", Vd, Vn, Vm);
} else if (instr->Bits(11, 8) == 1 && instr->Bits(21, 20) == 0 &&
instr->Bit(4) == 1) {
if (instr->Bit(6) == 0) {
// veor Dd, Dn, Dm
int Vd = instr->VFPDRegValue(kDoublePrecision);
int Vn = instr->VFPNRegValue(kDoublePrecision);
int Vm = instr->VFPMRegValue(kDoublePrecision);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"veor d%d, d%d, d%d", Vd, Vn, Vm);
} else {
// veor Qd, Qn, Qm
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"veor q%d, q%d, q%d", Vd, Vn, Vm);
}
} else if (instr->Bits(11, 8) == 0xd && instr->Bit(21) == 0 &&
instr->Bit(6) == 1 && instr->Bit(4) == 1) {
// vmul.f32 Qd, Qn, Qm
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vmul.f32 q%d, q%d, q%d", Vd, Vn, Vm);
} else if (instr->Bits(11, 8) == 0xe && instr->Bit(20) == 0 &&
instr->Bit(4) == 0) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
const char* op = (instr->Bit(21) == 0) ? "vcge" : "vcgt";
// vcge/vcgt.f32 Qd, Qm, Qn.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"%s.f32 q%d, q%d, q%d", op, Vd, Vn, Vm);
} else if (instr->Bits(11, 8) == 0x3) {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
const char* op = (instr->Bit(4) == 1) ? "vcge" : "vcgt";
// vcge/vcgt.u<size> Qd, Qm, Qn.
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "%s.u%d q%d, q%d, q%d", op,
size, Vd, Vn, Vm);
} else if (instr->Bits(11, 8) == 0x6) {
int size = kBitsPerByte * (1 << instr->Bits(21, 20));
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int Vn = instr->VFPNRegValue(kSimd128Precision);
// vmin/vmax.u<size> Qd, Qm, Qn.
const char* op = instr->Bit(4) == 1 ? "vmin" : "vmax";
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "%s.u%d q%d, q%d, q%d", op,
size, Vd, Vn, Vm);
} else { } else {
Unknown(instr); Unknown(instr);
} }
@ -1825,13 +2068,109 @@ void Decoder::DecodeSpecialCondition(Instruction* instr) {
int imm3 = instr->Bits(21, 19); int imm3 = instr->Bits(21, 19);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vmovl.u%d q%d, d%d", imm3*8, Vd, Vm); "vmovl.u%d q%d, d%d", imm3*8, Vd, Vm);
} else if ((instr->Bits(21, 16) == 0x32) && (instr->Bits(11, 7) == 0) && } else if (instr->Opc1Value() == 7 && instr->Bits(21, 20) == 0x3 &&
(instr->Bit(4) == 0)) { instr->Bit(4) == 0) {
int Vd = instr->VFPDRegValue(kDoublePrecision); if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 7) == 0) {
int Vm = instr->VFPMRegValue(kDoublePrecision); if (instr->Bit(6) == 0) {
char rtype = (instr->Bit(6) == 0) ? 'd' : 'q'; int Vd = instr->VFPDRegValue(kDoublePrecision);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, int Vm = instr->VFPMRegValue(kDoublePrecision);
"vswp %c%d, %c%d", rtype, Vd, rtype, Vm); out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vswp d%d, d%d", Vd, Vm);
} else {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vswp q%d, q%d", Vd, Vm);
}
} else if (instr->Bits(11, 7) == 0x18) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kDoublePrecision);
int index = instr->Bit(19);
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vdup q%d, d%d[%d]", Vd, Vm, index);
} else if (instr->Bits(19, 16) == 0 && instr->Bits(11, 6) == 0x17) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vmvn q%d, q%d", Vd, Vm);
} else if (instr->Bits(19, 16) == 0xB && instr->Bits(11, 9) == 0x3 &&
instr->Bit(6) == 1) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
const char* suffix = nullptr;
int op = instr->Bits(8, 7);
switch (op) {
case 0:
suffix = "f32.s32";
break;
case 1:
suffix = "f32.u32";
break;
case 2:
suffix = "s32.f32";
break;
case 3:
suffix = "u32.f32";
break;
}
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vcvt.%s q%d, q%d", suffix, Vd, Vm);
} else if (instr->Bits(11, 10) == 0x2) {
int Vd = instr->VFPDRegValue(kDoublePrecision);
int Vn = instr->VFPNRegValue(kDoublePrecision);
int Vm = instr->VFPMRegValue(kDoublePrecision);
int len = instr->Bits(9, 8);
NeonListOperand list(DwVfpRegister::from_code(Vn), len + 1);
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "%s d%d, ",
instr->Bit(6) == 0 ? "vtbl.8" : "vtbx.8", Vd);
FormatNeonList(Vn, list.type());
Print(", ");
PrintDRegister(Vm);
} else if (instr->Bits(17, 16) == 0x2 && instr->Bits(11, 6) == 0x7) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int size = kBitsPerByte * (1 << instr->Bits(19, 18));
// vzip.<size> Qd, Qm.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vzip.%d q%d, q%d", size, Vd, Vm);
} else if (instr->Bits(17, 16) == 0 && instr->Bits(11, 9) == 0) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int size = kBitsPerByte * (1 << instr->Bits(19, 18));
int op = kBitsPerByte
<< (static_cast<int>(Neon64) - instr->Bits(8, 7));
// vrev<op>.<size> Qd, Qm.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"vrev%d.%d q%d, q%d", op, size, Vd, Vm);
} else if (instr->Bits(17, 16) == 0x1 && instr->Bit(11) == 0) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
int size = kBitsPerByte * (1 << instr->Bits(19, 18));
const char* type = instr->Bit(10) != 0 ? "f" : "s";
if (instr->Bits(9, 6) == 0xd) {
// vabs<type>.<size> Qd, Qm.
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vabs.%s%d q%d, q%d",
type, size, Vd, Vm);
} else if (instr->Bits(9, 6) == 0xf) {
// vneg<type>.<size> Qd, Qm.
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, "vneg.%s%d q%d, q%d",
type, size, Vd, Vm);
} else {
Unknown(instr);
}
} else if (instr->Bits(19, 18) == 0x2 && instr->Bits(11, 8) == 0x5) {
int Vd = instr->VFPDRegValue(kSimd128Precision);
int Vm = instr->VFPMRegValue(kSimd128Precision);
const char* op = instr->Bit(7) == 0 ? "vrecpe" : "vrsqrte";
// vrecpe/vrsqrte.f32 Qd, Qm.
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
"%s.f32 q%d, q%d", op, Vd, Vm);
} else {
Unknown(instr);
}
} else { } else {
Unknown(instr); Unknown(instr);
} }

View File

@ -66,13 +66,7 @@ const Register GrowArrayElementsDescriptor::KeyRegister() { return r3; }
void FastNewClosureDescriptor::InitializePlatformSpecific( void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
Register registers[] = {r2}; Register registers[] = {r1, r2, r3};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void FastNewObjectDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r1, r3};
data->InitializePlatformSpecific(arraysize(registers), registers); data->InitializePlatformSpecific(arraysize(registers), registers);
} }

View File

@ -264,6 +264,35 @@ void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src,
} }
} }
void MacroAssembler::Move(QwNeonRegister dst, QwNeonRegister src) {
if (!dst.is(src)) {
vmov(dst, src);
}
}
void MacroAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
if (srcdst0.is(srcdst1)) return; // Swapping aliased registers emits nothing.
DCHECK(VfpRegisterIsAvailable(srcdst0));
DCHECK(VfpRegisterIsAvailable(srcdst1));
if (CpuFeatures::IsSupported(NEON)) {
vswp(srcdst0, srcdst1);
} else {
DCHECK(!srcdst0.is(kScratchDoubleReg));
DCHECK(!srcdst1.is(kScratchDoubleReg));
vmov(kScratchDoubleReg, srcdst0);
vmov(srcdst0, srcdst1);
vmov(srcdst1, kScratchDoubleReg);
}
}
void MacroAssembler::Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1) {
if (!srcdst0.is(srcdst1)) {
vswp(srcdst0, srcdst1);
}
}
void MacroAssembler::Mls(Register dst, Register src1, Register src2, void MacroAssembler::Mls(Register dst, Register src1, Register src2,
Register srcA, Condition cond) { Register srcA, Condition cond) {
if (CpuFeatures::IsSupported(ARMv7)) { if (CpuFeatures::IsSupported(ARMv7)) {
@ -1052,8 +1081,8 @@ void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) {
} }
void MacroAssembler::VmovExtended(Register dst, int src_code) { void MacroAssembler::VmovExtended(Register dst, int src_code) {
DCHECK_LE(32, src_code); DCHECK_LE(SwVfpRegister::kMaxNumRegisters, src_code);
DCHECK_GT(64, src_code); DCHECK_GT(SwVfpRegister::kMaxNumRegisters * 2, src_code);
if (src_code & 0x1) { if (src_code & 0x1) {
VmovHigh(dst, DwVfpRegister::from_code(src_code / 2)); VmovHigh(dst, DwVfpRegister::from_code(src_code / 2));
} else { } else {
@ -1062,8 +1091,8 @@ void MacroAssembler::VmovExtended(Register dst, int src_code) {
} }
void MacroAssembler::VmovExtended(int dst_code, Register src) { void MacroAssembler::VmovExtended(int dst_code, Register src) {
DCHECK_LE(32, dst_code); DCHECK_LE(SwVfpRegister::kMaxNumRegisters, dst_code);
DCHECK_GT(64, dst_code); DCHECK_GT(SwVfpRegister::kMaxNumRegisters * 2, dst_code);
if (dst_code & 0x1) { if (dst_code & 0x1) {
VmovHigh(DwVfpRegister::from_code(dst_code / 2), src); VmovHigh(DwVfpRegister::from_code(dst_code / 2), src);
} else { } else {
@ -1073,22 +1102,23 @@ void MacroAssembler::VmovExtended(int dst_code, Register src) {
void MacroAssembler::VmovExtended(int dst_code, int src_code, void MacroAssembler::VmovExtended(int dst_code, int src_code,
Register scratch) { Register scratch) {
if (src_code < 32 && dst_code < 32) { if (src_code < SwVfpRegister::kMaxNumRegisters &&
dst_code < SwVfpRegister::kMaxNumRegisters) {
// src and dst are both s-registers. // src and dst are both s-registers.
vmov(SwVfpRegister::from_code(dst_code), vmov(SwVfpRegister::from_code(dst_code),
SwVfpRegister::from_code(src_code)); SwVfpRegister::from_code(src_code));
} else if (src_code < 32) { } else if (src_code < SwVfpRegister::kMaxNumRegisters) {
// src is an s-register. // src is an s-register.
vmov(scratch, SwVfpRegister::from_code(src_code)); vmov(scratch, SwVfpRegister::from_code(src_code));
VmovExtended(dst_code, scratch); VmovExtended(dst_code, scratch);
} else if (dst_code < 32) { } else if (dst_code < SwVfpRegister::kMaxNumRegisters) {
// dst is an s-register. // dst is an s-register.
VmovExtended(scratch, src_code); VmovExtended(scratch, src_code);
vmov(SwVfpRegister::from_code(dst_code), scratch); vmov(SwVfpRegister::from_code(dst_code), scratch);
} else { } else {
// Neither src or dst are s-registers. // Neither src or dst are s-registers.
DCHECK_GT(64, src_code); DCHECK_GT(SwVfpRegister::kMaxNumRegisters * 2, src_code);
DCHECK_GT(64, dst_code); DCHECK_GT(SwVfpRegister::kMaxNumRegisters * 2, dst_code);
VmovExtended(scratch, src_code); VmovExtended(scratch, src_code);
VmovExtended(dst_code, scratch); VmovExtended(dst_code, scratch);
} }
@ -1096,7 +1126,7 @@ void MacroAssembler::VmovExtended(int dst_code, int src_code,
void MacroAssembler::VmovExtended(int dst_code, const MemOperand& src, void MacroAssembler::VmovExtended(int dst_code, const MemOperand& src,
Register scratch) { Register scratch) {
if (dst_code >= 32) { if (dst_code >= SwVfpRegister::kMaxNumRegisters) {
ldr(scratch, src); ldr(scratch, src);
VmovExtended(dst_code, scratch); VmovExtended(dst_code, scratch);
} else { } else {
@ -1106,7 +1136,7 @@ void MacroAssembler::VmovExtended(int dst_code, const MemOperand& src,
void MacroAssembler::VmovExtended(const MemOperand& dst, int src_code, void MacroAssembler::VmovExtended(const MemOperand& dst, int src_code,
Register scratch) { Register scratch) {
if (src_code >= 32) { if (src_code >= SwVfpRegister::kMaxNumRegisters) {
VmovExtended(scratch, src_code); VmovExtended(scratch, src_code);
str(scratch, dst); str(scratch, dst);
} else { } else {
@ -1114,6 +1144,105 @@ void MacroAssembler::VmovExtended(const MemOperand& dst, int src_code,
} }
} }
void MacroAssembler::ExtractLane(Register dst, QwNeonRegister src,
NeonDataType dt, int lane) {
int bytes_per_lane = dt & NeonDataTypeSizeMask; // 1, 2, 4
int log2_bytes_per_lane = bytes_per_lane / 2; // 0, 1, 2
int byte = lane << log2_bytes_per_lane;
int double_word = byte >> kDoubleSizeLog2;
int double_byte = byte & (kDoubleSize - 1);
int double_lane = double_byte >> log2_bytes_per_lane;
DwVfpRegister double_source =
DwVfpRegister::from_code(src.code() * 2 + double_word);
vmov(dt, dst, double_source, double_lane);
}
void MacroAssembler::ExtractLane(SwVfpRegister dst, QwNeonRegister src,
Register scratch, int lane) {
int s_code = src.code() * 4 + lane;
VmovExtended(dst.code(), s_code, scratch);
}
void MacroAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
Register src_lane, NeonDataType dt, int lane) {
Move(dst, src);
int bytes_per_lane = dt & NeonDataTypeSizeMask; // 1, 2, 4
int log2_bytes_per_lane = bytes_per_lane / 2; // 0, 1, 2
int byte = lane << log2_bytes_per_lane;
int double_word = byte >> kDoubleSizeLog2;
int double_byte = byte & (kDoubleSize - 1);
int double_lane = double_byte >> log2_bytes_per_lane;
DwVfpRegister double_dst =
DwVfpRegister::from_code(dst.code() * 2 + double_word);
vmov(dt, double_dst, double_lane, src_lane);
}
void MacroAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
SwVfpRegister src_lane, Register scratch,
int lane) {
Move(dst, src);
int s_code = dst.code() * 4 + lane;
VmovExtended(s_code, src_lane.code(), scratch);
}
void MacroAssembler::Swizzle(QwNeonRegister dst, QwNeonRegister src,
Register scratch, NeonSize size, uint32_t lanes) {
// TODO(bbudge) Handle Int16x8, Int8x16 vectors.
DCHECK_EQ(Neon32, size);
DCHECK_IMPLIES(size == Neon32, lanes < 0xFFFFu);
if (size == Neon32) {
switch (lanes) {
// TODO(bbudge) Handle more special cases.
case 0x3210: // Identity.
Move(dst, src);
return;
case 0x1032: // Swap top and bottom.
vext(dst, src, src, 8);
return;
case 0x2103: // Rotation.
vext(dst, src, src, 12);
return;
case 0x0321: // Rotation.
vext(dst, src, src, 4);
return;
case 0x0000: // Equivalent to vdup.
case 0x1111:
case 0x2222:
case 0x3333: {
int lane_code = src.code() * 4 + (lanes & 0xF);
if (lane_code >= SwVfpRegister::kMaxNumRegisters) {
// TODO(bbudge) use vdup (vdup.32 dst, D<src>[lane]) once implemented.
int temp_code = kScratchDoubleReg.code() * 2;
VmovExtended(temp_code, lane_code, scratch);
lane_code = temp_code;
}
vdup(dst, SwVfpRegister::from_code(lane_code));
return;
}
case 0x2301: // Swap lanes 0, 1 and lanes 2, 3.
vrev64(Neon32, dst, src);
return;
default: // Handle all other cases with vmovs.
int src_code = src.code() * 4;
int dst_code = dst.code() * 4;
bool in_place = src.is(dst);
if (in_place) {
vmov(kScratchQuadReg, src);
src_code = kScratchQuadReg.code() * 4;
}
for (int i = 0; i < 4; i++) {
int lane = (lanes >> (i * 4) & 0xF);
VmovExtended(dst_code + i, src_code + lane, scratch);
}
if (in_place) {
// Restore zero reg.
veor(kDoubleRegZero, kDoubleRegZero, kDoubleRegZero);
}
return;
}
}
}
void MacroAssembler::LslPair(Register dst_low, Register dst_high, void MacroAssembler::LslPair(Register dst_low, Register dst_high,
Register src_low, Register src_high, Register src_low, Register src_high,
Register scratch, Register shift) { Register scratch, Register shift) {
@ -1629,18 +1758,16 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
} }
} }
void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target, const ParameterCount& expected,
const ParameterCount& expected, const ParameterCount& actual) {
const ParameterCount& actual) { Label skip_hook;
Label skip_flooding; ExternalReference debug_hook_avtive =
ExternalReference last_step_action = ExternalReference::debug_hook_on_function_call_address(isolate());
ExternalReference::debug_last_step_action_address(isolate()); mov(r4, Operand(debug_hook_avtive));
STATIC_ASSERT(StepFrame > StepIn);
mov(r4, Operand(last_step_action));
ldrsb(r4, MemOperand(r4)); ldrsb(r4, MemOperand(r4));
cmp(r4, Operand(StepIn)); cmp(r4, Operand(0));
b(lt, &skip_flooding); b(eq, &skip_hook);
{ {
FrameScope frame(this, FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL); has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@ -1657,7 +1784,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
} }
Push(fun); Push(fun);
Push(fun); Push(fun);
CallRuntime(Runtime::kDebugPrepareStepInIfStepping); CallRuntime(Runtime::kDebugOnFunctionCall);
Pop(fun); Pop(fun);
if (new_target.is_valid()) { if (new_target.is_valid()) {
Pop(new_target); Pop(new_target);
@ -1671,7 +1798,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
SmiUntag(expected.reg()); SmiUntag(expected.reg());
} }
} }
bind(&skip_flooding); bind(&skip_hook);
} }
@ -1685,8 +1812,8 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
DCHECK(function.is(r1)); DCHECK(function.is(r1));
DCHECK_IMPLIES(new_target.is_valid(), new_target.is(r3)); DCHECK_IMPLIES(new_target.is_valid(), new_target.is(r3));
if (call_wrapper.NeedsDebugStepCheck()) { if (call_wrapper.NeedsDebugHookCheck()) {
FloodFunctionIfStepping(function, new_target, expected, actual); CheckDebugHook(function, new_target, expected, actual);
} }
// Clear the new.target register if not given. // Clear the new.target register if not given.
@ -2177,112 +2304,6 @@ void MacroAssembler::FastAllocate(int object_size, Register result,
add(result, result, Operand(kHeapObjectTag)); add(result, result, Operand(kHeapObjectTag));
} }
void MacroAssembler::AllocateTwoByteString(Register result,
Register length,
Register scratch1,
Register scratch2,
Register scratch3,
Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
mov(scratch1, Operand(length, LSL, 1)); // Length in bytes, not chars.
add(scratch1, scratch1,
Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate two-byte string in new space.
Allocate(scratch1, result, scratch2, scratch3, gc_required,
NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
InitializeNewString(result,
length,
Heap::kStringMapRootIndex,
scratch1,
scratch2);
}
void MacroAssembler::AllocateOneByteString(Register result, Register length,
Register scratch1, Register scratch2,
Register scratch3,
Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
DCHECK(kCharSize == 1);
add(scratch1, length,
Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate one-byte string in new space.
Allocate(scratch1, result, scratch2, scratch3, gc_required,
NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
scratch1, scratch2);
}
void MacroAssembler::AllocateTwoByteConsString(Register result,
Register length,
Register scratch1,
Register scratch2,
Label* gc_required) {
Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
NO_ALLOCATION_FLAGS);
InitializeNewString(result,
length,
Heap::kConsStringMapRootIndex,
scratch1,
scratch2);
}
void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
Register scratch1,
Register scratch2,
Label* gc_required) {
Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
NO_ALLOCATION_FLAGS);
InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
scratch1, scratch2);
}
void MacroAssembler::AllocateTwoByteSlicedString(Register result,
Register length,
Register scratch1,
Register scratch2,
Label* gc_required) {
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
NO_ALLOCATION_FLAGS);
InitializeNewString(result,
length,
Heap::kSlicedStringMapRootIndex,
scratch1,
scratch2);
}
void MacroAssembler::AllocateOneByteSlicedString(Register result,
Register length,
Register scratch1,
Register scratch2,
Label* gc_required) {
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
NO_ALLOCATION_FLAGS);
InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
scratch1, scratch2);
}
void MacroAssembler::CompareObjectType(Register object, void MacroAssembler::CompareObjectType(Register object,
Register map, Register map,
Register type_reg, Register type_reg,
@ -2314,68 +2335,6 @@ void MacroAssembler::CompareRoot(Register obj,
cmp(obj, ip); cmp(obj, ip);
} }
void MacroAssembler::CheckFastObjectElements(Register map,
Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
b(ls, fail);
cmp(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
b(hi, fail);
}
void MacroAssembler::CheckFastSmiElements(Register map,
Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
b(hi, fail);
}
void MacroAssembler::StoreNumberToDoubleElements(
Register value_reg,
Register key_reg,
Register elements_reg,
Register scratch1,
LowDwVfpRegister double_scratch,
Label* fail,
int elements_offset) {
DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
Label smi_value, store;
// Handle smi values specially.
JumpIfSmi(value_reg, &smi_value);
// Ensure that the object is a heap number
CheckMap(value_reg,
scratch1,
isolate()->factory()->heap_number_map(),
fail,
DONT_DO_SMI_CHECK);
vldr(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
VFPCanonicalizeNaN(double_scratch);
b(&store);
bind(&smi_value);
SmiToDouble(double_scratch, value_reg);
bind(&store);
add(scratch1, elements_reg, Operand::DoubleOffsetFromSmiKey(key_reg));
vstr(double_scratch,
FieldMemOperand(scratch1,
FixedDoubleArray::kHeaderSize - elements_offset));
}
void MacroAssembler::CompareMap(Register obj, void MacroAssembler::CompareMap(Register obj,
Register scratch, Register scratch,
Handle<Map> map, Handle<Map> map,
@ -2878,28 +2837,6 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
} }
} }
void MacroAssembler::LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
ElementsKind transitioned_kind,
Register map_in_out,
Register scratch,
Label* no_map_match) {
DCHECK(IsFastElementsKind(expected_kind));
DCHECK(IsFastElementsKind(transitioned_kind));
// Check that the function's map is the same as the expected cached map.
ldr(scratch, NativeContextMemOperand());
ldr(ip, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind)));
cmp(map_in_out, ip);
b(ne, no_map_match);
// Use the transitioned cached map.
ldr(map_in_out,
ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind)));
}
void MacroAssembler::LoadNativeContextSlot(int index, Register dst) { void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
ldr(dst, NativeContextMemOperand()); ldr(dst, NativeContextMemOperand());
ldr(dst, ContextMemOperand(dst, index)); ldr(dst, ContextMemOperand(dst, index));
@ -2962,15 +2899,6 @@ void MacroAssembler::UntagAndJumpIfSmi(
b(cc, smi_case); // Shifter carry is not set for a smi. b(cc, smi_case); // Shifter carry is not set for a smi.
} }
void MacroAssembler::UntagAndJumpIfNotSmi(
Register dst, Register src, Label* non_smi_case) {
STATIC_ASSERT(kSmiTag == 0);
SmiUntag(dst, src, SetCC);
b(cs, non_smi_case); // Shifter carry is set for a non-smi.
}
void MacroAssembler::JumpIfEitherSmi(Register reg1, void MacroAssembler::JumpIfEitherSmi(Register reg1,
Register reg2, Register reg2,
Label* on_either_smi) { Label* on_either_smi) {
@ -3411,19 +3339,6 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
b(ne, failure); b(ne, failure);
} }
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
Register scratch,
Label* failure) {
const int kFlatOneByteStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
const int kFlatOneByteStringTag =
kStringTag | kOneByteStringTag | kSeqStringTag;
and_(scratch, type, Operand(kFlatOneByteStringMask));
cmp(scratch, Operand(kFlatOneByteStringTag));
b(ne, failure);
}
static const int kRegisterPassedArguments = 4; static const int kRegisterPassedArguments = 4;
@ -3861,45 +3776,6 @@ Register GetRegisterThatIsNotOneOf(Register reg1,
return no_reg; return no_reg;
} }
void MacroAssembler::JumpIfDictionaryInPrototypeChain(
Register object,
Register scratch0,
Register scratch1,
Label* found) {
DCHECK(!scratch1.is(scratch0));
Register current = scratch0;
Label loop_again, end;
// scratch contained elements pointer.
mov(current, object);
ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
CompareRoot(current, Heap::kNullValueRootIndex);
b(eq, &end);
// Loop based on the map going up the prototype chain.
bind(&loop_again);
ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
ldrb(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset));
cmp(scratch1, Operand(JS_OBJECT_TYPE));
b(lo, found);
ldr(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
DecodeField<Map::ElementsKindBits>(scratch1);
cmp(scratch1, Operand(DICTIONARY_ELEMENTS));
b(eq, found);
ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
CompareRoot(current, Heap::kNullValueRootIndex);
b(ne, &loop_again);
bind(&end);
}
#ifdef DEBUG #ifdef DEBUG
bool AreAliased(Register reg1, bool AreAliased(Register reg1,
Register reg2, Register reg2,

View File

@ -184,6 +184,10 @@ class MacroAssembler: public Assembler {
} }
void Move(SwVfpRegister dst, SwVfpRegister src, Condition cond = al); void Move(SwVfpRegister dst, SwVfpRegister src, Condition cond = al);
void Move(DwVfpRegister dst, DwVfpRegister src, Condition cond = al); void Move(DwVfpRegister dst, DwVfpRegister src, Condition cond = al);
void Move(QwNeonRegister dst, QwNeonRegister src);
// Register swap.
void Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1);
void Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1);
void Load(Register dst, const MemOperand& src, Representation r); void Load(Register dst, const MemOperand& src, Representation r);
void Store(Register src, const MemOperand& dst, Representation r); void Store(Register src, const MemOperand& dst, Representation r);
@ -557,6 +561,16 @@ class MacroAssembler: public Assembler {
void VmovExtended(int dst_code, const MemOperand& src, Register scratch); void VmovExtended(int dst_code, const MemOperand& src, Register scratch);
void VmovExtended(const MemOperand& dst, int src_code, Register scratch); void VmovExtended(const MemOperand& dst, int src_code, Register scratch);
void ExtractLane(Register dst, QwNeonRegister src, NeonDataType dt, int lane);
void ExtractLane(SwVfpRegister dst, QwNeonRegister src, Register scratch,
int lane);
void ReplaceLane(QwNeonRegister dst, QwNeonRegister src, Register src_lane,
NeonDataType dt, int lane);
void ReplaceLane(QwNeonRegister dst, QwNeonRegister src,
SwVfpRegister src_lane, Register scratch, int lane);
void Swizzle(QwNeonRegister dst, QwNeonRegister src, Register scratch,
NeonSize size, uint32_t lanes);
void LslPair(Register dst_low, Register dst_high, Register src_low, void LslPair(Register dst_low, Register dst_high, Register src_low,
Register src_high, Register scratch, Register shift); Register src_high, Register scratch, Register shift);
void LslPair(Register dst_low, Register dst_high, Register src_low, void LslPair(Register dst_low, Register dst_high, Register src_low,
@ -635,17 +649,6 @@ class MacroAssembler: public Assembler {
LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst); LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
} }
// Conditionally load the cached Array transitioned map of type
// transitioned_kind from the native context if the map in register
// map_in_out is the cached Array map in the native context of
// expected_kind.
void LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
ElementsKind transitioned_kind,
Register map_in_out,
Register scratch,
Label* no_map_match);
void LoadNativeContextSlot(int index, Register dst); void LoadNativeContextSlot(int index, Register dst);
// Load the initial map from the global function. The registers // Load the initial map from the global function. The registers
@ -678,9 +681,10 @@ class MacroAssembler: public Assembler {
const ParameterCount& actual, InvokeFlag flag, const ParameterCount& actual, InvokeFlag flag,
const CallWrapper& call_wrapper); const CallWrapper& call_wrapper);
void FloodFunctionIfStepping(Register fun, Register new_target, // On function call, call into the debugger if necessary.
const ParameterCount& expected, void CheckDebugHook(Register fun, Register new_target,
const ParameterCount& actual); const ParameterCount& expected,
const ParameterCount& actual);
// Invoke the JavaScript function in the given register. Changes the // Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking. // current context to the context in the function before invoking.
@ -794,32 +798,6 @@ class MacroAssembler: public Assembler {
void FastAllocate(Register object_size, Register result, Register result_end, void FastAllocate(Register object_size, Register result, Register result_end,
Register scratch, AllocationFlags flags); Register scratch, AllocationFlags flags);
void AllocateTwoByteString(Register result,
Register length,
Register scratch1,
Register scratch2,
Register scratch3,
Label* gc_required);
void AllocateOneByteString(Register result, Register length,
Register scratch1, Register scratch2,
Register scratch3, Label* gc_required);
void AllocateTwoByteConsString(Register result,
Register length,
Register scratch1,
Register scratch2,
Label* gc_required);
void AllocateOneByteConsString(Register result, Register length,
Register scratch1, Register scratch2,
Label* gc_required);
void AllocateTwoByteSlicedString(Register result,
Register length,
Register scratch1,
Register scratch2,
Label* gc_required);
void AllocateOneByteSlicedString(Register result, Register length,
Register scratch1, Register scratch2,
Label* gc_required);
// Allocates a heap number or jumps to the gc_required label if the young // Allocates a heap number or jumps to the gc_required label if the young
// space is full and a scavenge is needed. All registers are clobbered also // space is full and a scavenge is needed. All registers are clobbered also
// when control continues at the gc_required label. // when control continues at the gc_required label.
@ -884,29 +862,6 @@ class MacroAssembler: public Assembler {
Register type_reg, Register type_reg,
InstanceType type); InstanceType type);
// Check if a map for a JSObject indicates that the object can have both smi
// and HeapObject elements. Jump to the specified label if it does not.
void CheckFastObjectElements(Register map,
Register scratch,
Label* fail);
// Check if a map for a JSObject indicates that the object has fast smi only
// elements. Jump to the specified label if it does not.
void CheckFastSmiElements(Register map,
Register scratch,
Label* fail);
// Check to see if maybe_number can be stored as a double in
// FastDoubleElements. If it can, store it at the index specified by key in
// the FastDoubleElements array elements. Otherwise jump to fail.
void StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
Register elements_reg,
Register scratch1,
LowDwVfpRegister double_scratch,
Label* fail,
int elements_offset = 0);
// Compare an object's map with the specified map and its transitioned // Compare an object's map with the specified map and its transitioned
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
// set with result of map compare. If multiple map compares are required, the // set with result of map compare. If multiple map compares are required, the
@ -1287,10 +1242,6 @@ class MacroAssembler: public Assembler {
// Souce and destination can be the same register. // Souce and destination can be the same register.
void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case); void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
// Untag the source value into destination and jump if source is not a smi.
// Souce and destination can be the same register.
void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
// Test if the register contains a smi (Z == 0 (eq) if true). // Test if the register contains a smi (Z == 0 (eq) if true).
inline void SmiTst(Register value) { inline void SmiTst(Register value) {
tst(value, Operand(kSmiTagMask)); tst(value, Operand(kSmiTagMask));
@ -1380,11 +1331,6 @@ class MacroAssembler: public Assembler {
Register first_object_instance_type, Register second_object_instance_type, Register first_object_instance_type, Register second_object_instance_type,
Register scratch1, Register scratch2, Label* failure); Register scratch1, Register scratch2, Label* failure);
// Check if instance type is sequential one-byte string and jump to label if
// it is not.
void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
Label* failure);
void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name); void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
void EmitSeqStringSetCharCheck(Register string, void EmitSeqStringSetCharCheck(Register string,
@ -1464,20 +1410,6 @@ class MacroAssembler: public Assembler {
Register scratch_reg, Register scratch_reg,
Label* no_memento_found); Label* no_memento_found);
void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
Register scratch_reg,
Label* memento_found) {
Label no_memento_found;
TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
&no_memento_found);
b(eq, memento_found);
bind(&no_memento_found);
}
// Jumps to found label if a prototype map has dictionary elements.
void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
Register scratch1, Label* found);
// Loads the constant pool pointer (pp) register. // Loads the constant pool pointer (pp) register.
void LoadConstantPoolPointerRegisterFromCodeTargetAddress( void LoadConstantPoolPointerRegisterFromCodeTargetAddress(
Register code_target_address); Register code_target_address);

File diff suppressed because it is too large Load Diff

View File

@ -151,10 +151,11 @@ class Simulator {
void set_d_register(int dreg, const uint64_t* value); void set_d_register(int dreg, const uint64_t* value);
void get_d_register(int dreg, uint32_t* value); void get_d_register(int dreg, uint32_t* value);
void set_d_register(int dreg, const uint32_t* value); void set_d_register(int dreg, const uint32_t* value);
void get_q_register(int qreg, uint64_t* value); // Support for NEON.
void set_q_register(int qreg, const uint64_t* value); template <typename T>
void get_q_register(int qreg, uint32_t* value); void get_q_register(int qreg, T* value);
void set_q_register(int qreg, const uint32_t* value); template <typename T>
void set_q_register(int qreg, const T* value);
void set_s_register(int reg, unsigned int value); void set_s_register(int reg, unsigned int value);
unsigned int get_s_register(int reg) const; unsigned int get_s_register(int reg) const;
@ -339,6 +340,8 @@ class Simulator {
void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr); void DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(Instruction* instr);
void DecodeVCMP(Instruction* instr); void DecodeVCMP(Instruction* instr);
void DecodeVCVTBetweenDoubleAndSingle(Instruction* instr); void DecodeVCVTBetweenDoubleAndSingle(Instruction* instr);
int32_t ConvertDoubleToInt(double val, bool unsigned_integer,
VFPRoundingMode mode);
void DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr); void DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr);
// Executes one instruction. // Executes one instruction.

View File

@ -194,13 +194,18 @@ Address RelocInfo::wasm_global_reference() {
return Memory::Address_at(Assembler::target_pointer_address_at(pc_)); return Memory::Address_at(Assembler::target_pointer_address_at(pc_));
} }
uint32_t RelocInfo::wasm_function_table_size_reference() {
DCHECK(IsWasmFunctionTableSizeReference(rmode_));
return Memory::uint32_at(Assembler::target_pointer_address_at(pc_));
}
void RelocInfo::unchecked_update_wasm_memory_reference( void RelocInfo::unchecked_update_wasm_memory_reference(
Address address, ICacheFlushMode flush_mode) { Address address, ICacheFlushMode flush_mode) {
Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode); Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
} }
void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size, void RelocInfo::unchecked_update_wasm_size(uint32_t size,
ICacheFlushMode flush_mode) { ICacheFlushMode flush_mode) {
Memory::uint32_at(Assembler::target_pointer_address_at(pc_)) = size; Memory::uint32_at(Assembler::target_pointer_address_at(pc_)) = size;
} }
@ -2950,15 +2955,13 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
(rmode == RelocInfo::CONST_POOL) || (rmode == RelocInfo::VENEER_POOL) || (rmode == RelocInfo::CONST_POOL) || (rmode == RelocInfo::VENEER_POOL) ||
(rmode == RelocInfo::DEOPT_SCRIPT_OFFSET) || (rmode == RelocInfo::DEOPT_SCRIPT_OFFSET) ||
(rmode == RelocInfo::DEOPT_INLINING_ID) || (rmode == RelocInfo::DEOPT_INLINING_ID) ||
(rmode == RelocInfo::DEOPT_REASON) || (rmode == RelocInfo::DEOPT_ID) || (rmode == RelocInfo::DEOPT_REASON) || (rmode == RelocInfo::DEOPT_ID)) {
(rmode == RelocInfo::GENERATOR_CONTINUATION)) {
// Adjust code for new modes. // Adjust code for new modes.
DCHECK(RelocInfo::IsDebugBreakSlot(rmode) || RelocInfo::IsComment(rmode) || DCHECK(RelocInfo::IsDebugBreakSlot(rmode) || RelocInfo::IsComment(rmode) ||
RelocInfo::IsDeoptReason(rmode) || RelocInfo::IsDeoptId(rmode) || RelocInfo::IsDeoptReason(rmode) || RelocInfo::IsDeoptId(rmode) ||
RelocInfo::IsDeoptPosition(rmode) || RelocInfo::IsDeoptPosition(rmode) ||
RelocInfo::IsInternalReference(rmode) || RelocInfo::IsInternalReference(rmode) ||
RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode) || RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode));
RelocInfo::IsGeneratorContinuation(rmode));
// These modes do not need an entry in the constant pool. // These modes do not need an entry in the constant pool.
} else { } else {
constpool_.RecordEntry(data, rmode); constpool_.RecordEntry(data, rmode);

View File

@ -938,9 +938,6 @@ class Assembler : public AssemblerBase {
int buffer_space() const; int buffer_space() const;
// Mark generator continuation.
void RecordGeneratorContinuation();
// Mark address of a debug break slot. // Mark address of a debug break slot.
void RecordDebugBreakSlot(RelocInfo::Mode mode); void RecordDebugBreakSlot(RelocInfo::Mode mode);

View File

@ -33,17 +33,6 @@ void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
__ TailCallRuntime(Runtime::kNewArray); __ TailCallRuntime(Runtime::kNewArray);
} }
void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
descriptor->Initialize(x0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
}
void FastFunctionBindStub::InitializeDescriptor(
CodeStubDescriptor* descriptor) {
Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
descriptor->Initialize(x0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
}
void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm, void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
ExternalReference miss) { ExternalReference miss) {
// Update the static counter each time a new code stub is generated. // Update the static counter each time a new code stub is generated.
@ -590,8 +579,11 @@ void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
if (cond == eq) { if (cond == eq) {
{ {
FrameScope scope(masm, StackFrame::INTERNAL); FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(lhs, rhs); __ Push(cp);
__ CallRuntime(strict() ? Runtime::kStrictEqual : Runtime::kEqual); __ Call(strict() ? isolate()->builtins()->StrictEqual()
: isolate()->builtins()->Equal(),
RelocInfo::CODE_TARGET);
__ Pop(cp);
} }
// Turn true into 0 and false into some non-zero value. // Turn true into 0 and false into some non-zero value.
STATIC_ASSERT(EQUAL == 0); STATIC_ASSERT(EQUAL == 0);
@ -2980,234 +2972,6 @@ void CallICTrampolineStub::Generate(MacroAssembler* masm) {
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
} }
static void HandleArrayCases(MacroAssembler* masm, Register feedback,
Register receiver_map, Register scratch1,
Register scratch2, bool is_polymorphic,
Label* miss) {
// feedback initially contains the feedback array
Label next_loop, prepare_next;
Label load_smi_map, compare_map;
Label start_polymorphic;
Register cached_map = scratch1;
__ Ldr(cached_map,
FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0)));
__ Ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
__ Cmp(receiver_map, cached_map);
__ B(ne, &start_polymorphic);
// found, now call handler.
Register handler = feedback;
__ Ldr(handler, FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1)));
__ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
__ Jump(feedback);
Register length = scratch2;
__ Bind(&start_polymorphic);
__ Ldr(length, FieldMemOperand(feedback, FixedArray::kLengthOffset));
if (!is_polymorphic) {
__ Cmp(length, Operand(Smi::FromInt(2)));
__ B(eq, miss);
}
Register too_far = length;
Register pointer_reg = feedback;
// +-----+------+------+-----+-----+ ... ----+
// | map | len | wm0 | h0 | wm1 | hN |
// +-----+------+------+-----+-----+ ... ----+
// 0 1 2 len-1
// ^ ^
// | |
// pointer_reg too_far
// aka feedback scratch2
// also need receiver_map
// use cached_map (scratch1) to look in the weak map values.
__ Add(too_far, feedback,
Operand::UntagSmiAndScale(length, kPointerSizeLog2));
__ Add(too_far, too_far, FixedArray::kHeaderSize - kHeapObjectTag);
__ Add(pointer_reg, feedback,
FixedArray::OffsetOfElementAt(2) - kHeapObjectTag);
__ Bind(&next_loop);
__ Ldr(cached_map, MemOperand(pointer_reg));
__ Ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
__ Cmp(receiver_map, cached_map);
__ B(ne, &prepare_next);
__ Ldr(handler, MemOperand(pointer_reg, kPointerSize));
__ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
__ Jump(handler);
__ Bind(&prepare_next);
__ Add(pointer_reg, pointer_reg, kPointerSize * 2);
__ Cmp(pointer_reg, too_far);
__ B(lt, &next_loop);
// We exhausted our array of map handler pairs.
__ jmp(miss);
}
static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver,
Register receiver_map, Register feedback,
Register vector, Register slot,
Register scratch, Label* compare_map,
Label* load_smi_map, Label* try_array) {
__ JumpIfSmi(receiver, load_smi_map);
__ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ bind(compare_map);
Register cached_map = scratch;
// Move the weak map into the weak_cell register.
__ Ldr(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset));
__ Cmp(cached_map, receiver_map);
__ B(ne, try_array);
Register handler = feedback;
__ Add(handler, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
__ Ldr(handler,
FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize));
__ Add(handler, handler, Code::kHeaderSize - kHeapObjectTag);
__ Jump(handler);
}
void KeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) {
__ EmitLoadTypeFeedbackVector(StoreWithVectorDescriptor::VectorRegister());
KeyedStoreICStub stub(isolate(), state());
stub.GenerateForTrampoline(masm);
}
void KeyedStoreICStub::Generate(MacroAssembler* masm) {
GenerateImpl(masm, false);
}
void KeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) {
GenerateImpl(masm, true);
}
static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback,
Register receiver_map, Register scratch1,
Register scratch2, Label* miss) {
// feedback initially contains the feedback array
Label next_loop, prepare_next;
Label start_polymorphic;
Label transition_call;
Register cached_map = scratch1;
Register too_far = scratch2;
Register pointer_reg = feedback;
__ Ldr(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset));
// +-----+------+------+-----+-----+-----+ ... ----+
// | map | len | wm0 | wt0 | h0 | wm1 | hN |
// +-----+------+------+-----+-----+ ----+ ... ----+
// 0 1 2 len-1
// ^ ^
// | |
// pointer_reg too_far
// aka feedback scratch2
// also need receiver_map
// use cached_map (scratch1) to look in the weak map values.
__ Add(too_far, feedback,
Operand::UntagSmiAndScale(too_far, kPointerSizeLog2));
__ Add(too_far, too_far, FixedArray::kHeaderSize - kHeapObjectTag);
__ Add(pointer_reg, feedback,
FixedArray::OffsetOfElementAt(0) - kHeapObjectTag);
__ Bind(&next_loop);
__ Ldr(cached_map, MemOperand(pointer_reg));
__ Ldr(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset));
__ Cmp(receiver_map, cached_map);
__ B(ne, &prepare_next);
// Is it a transitioning store?
__ Ldr(too_far, MemOperand(pointer_reg, kPointerSize));
__ CompareRoot(too_far, Heap::kUndefinedValueRootIndex);
__ B(ne, &transition_call);
__ Ldr(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2));
__ Add(pointer_reg, pointer_reg, Code::kHeaderSize - kHeapObjectTag);
__ Jump(pointer_reg);
__ Bind(&transition_call);
__ Ldr(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset));
__ JumpIfSmi(too_far, miss);
__ Ldr(receiver_map, MemOperand(pointer_reg, kPointerSize * 2));
// Load the map into the correct register.
DCHECK(feedback.is(StoreTransitionDescriptor::MapRegister()));
__ mov(feedback, too_far);
__ Add(receiver_map, receiver_map, Code::kHeaderSize - kHeapObjectTag);
__ Jump(receiver_map);
__ Bind(&prepare_next);
__ Add(pointer_reg, pointer_reg, kPointerSize * 3);
__ Cmp(pointer_reg, too_far);
__ B(lt, &next_loop);
// We exhausted our array of map handler pairs.
__ jmp(miss);
}
void KeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) {
Register receiver = StoreWithVectorDescriptor::ReceiverRegister(); // x1
Register key = StoreWithVectorDescriptor::NameRegister(); // x2
Register vector = StoreWithVectorDescriptor::VectorRegister(); // x3
Register slot = StoreWithVectorDescriptor::SlotRegister(); // x4
DCHECK(StoreWithVectorDescriptor::ValueRegister().is(x0)); // x0
Register feedback = x5;
Register receiver_map = x6;
Register scratch1 = x7;
__ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
__ Ldr(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize));
// Try to quickly handle the monomorphic case without knowing for sure
// if we have a weak cell in feedback. We do know it's safe to look
// at WeakCell::kValueOffset.
Label try_array, load_smi_map, compare_map;
Label not_array, miss;
HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot,
scratch1, &compare_map, &load_smi_map, &try_array);
__ Bind(&try_array);
// Is it a fixed array?
__ Ldr(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset));
__ JumpIfNotRoot(scratch1, Heap::kFixedArrayMapRootIndex, &not_array);
// We have a polymorphic element handler.
Label try_poly_name;
HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, x8, &miss);
__ Bind(&not_array);
// Is it generic?
__ JumpIfNotRoot(feedback, Heap::kmegamorphic_symbolRootIndex,
&try_poly_name);
Handle<Code> megamorphic_stub =
KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState());
__ Jump(megamorphic_stub, RelocInfo::CODE_TARGET);
__ Bind(&try_poly_name);
// We might have a name in feedback, and a fixed array in the next slot.
__ Cmp(key, feedback);
__ B(ne, &miss);
// If the name comparison succeeded, we know we have a fixed array with
// at least one map/handler pair.
__ Add(feedback, vector, Operand::UntagSmiAndScale(slot, kPointerSizeLog2));
__ Ldr(feedback,
FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize));
HandleArrayCases(masm, feedback, receiver_map, scratch1, x8, false, &miss);
__ Bind(&miss);
KeyedStoreIC::GenerateMiss(masm);
__ Bind(&load_smi_map);
__ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex);
__ jmp(&compare_map);
}
// The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by // The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
// a "Push lr" instruction, followed by a call. // a "Push lr" instruction, followed by a call.
static const unsigned int kProfileEntryHookCallSize = static const unsigned int kProfileEntryHookCallSize =
@ -3309,91 +3073,6 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
__ Blr(lr); __ Blr(lr);
} }
// Probe the name dictionary in the 'elements' register.
// Jump to the 'done' label if a property with the given name is found.
// Jump to the 'miss' label otherwise.
//
// If lookup was successful 'scratch2' will be equal to elements + 4 * index.
// 'elements' and 'name' registers are preserved on miss.
void NameDictionaryLookupStub::GeneratePositiveLookup(
MacroAssembler* masm,
Label* miss,
Label* done,
Register elements,
Register name,
Register scratch1,
Register scratch2) {
DCHECK(!AreAliased(elements, name, scratch1, scratch2));
// Assert that name contains a string.
__ AssertName(name);
// Compute the capacity mask.
__ Ldrsw(scratch1, UntagSmiFieldMemOperand(elements, kCapacityOffset));
__ Sub(scratch1, scratch1, 1);
// Generate an unrolled loop that performs a few probes before giving up.
for (int i = 0; i < kInlinedProbes; i++) {
// Compute the masked index: (hash + i + i * i) & mask.
__ Ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
if (i > 0) {
// Add the probe offset (i + i * i) left shifted to avoid right shifting
// the hash in a separate instruction. The value hash + i + i * i is right
// shifted in the following and instruction.
DCHECK(NameDictionary::GetProbeOffset(i) <
1 << (32 - Name::kHashFieldOffset));
__ Add(scratch2, scratch2, Operand(
NameDictionary::GetProbeOffset(i) << Name::kHashShift));
}
__ And(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
// Scale the index by multiplying by the element size.
STATIC_ASSERT(NameDictionary::kEntrySize == 3);
__ Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
// Check if the key is identical to the name.
UseScratchRegisterScope temps(masm);
Register scratch3 = temps.AcquireX();
__ Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
__ Ldr(scratch3, FieldMemOperand(scratch2, kElementsStartOffset));
__ Cmp(name, scratch3);
__ B(eq, done);
}
// The inlined probes didn't find the entry.
// Call the complete stub to scan the whole dictionary.
CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6);
spill_list.Combine(lr);
spill_list.Remove(scratch1);
spill_list.Remove(scratch2);
__ PushCPURegList(spill_list);
if (name.is(x0)) {
DCHECK(!elements.is(x1));
__ Mov(x1, name);
__ Mov(x0, elements);
} else {
__ Mov(x0, elements);
__ Mov(x1, name);
}
Label not_found;
NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
__ CallStub(&stub);
__ Cbz(x0, &not_found);
__ Mov(scratch2, x2); // Move entry index into scratch2.
__ PopCPURegList(spill_list);
__ B(done);
__ Bind(&not_found);
__ PopCPURegList(spill_list);
__ B(miss);
}
void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
Label* miss, Label* miss,
Label* done, Label* done,
@ -3875,127 +3554,6 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
GenerateCase(masm, FAST_ELEMENTS); GenerateCase(masm, FAST_ELEMENTS);
} }
void FastNewObjectStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- x1 : target
// -- x3 : new target
// -- cp : context
// -- lr : return address
// -----------------------------------
__ AssertFunction(x1);
__ AssertReceiver(x3);
// Verify that the new target is a JSFunction.
Label new_object;
__ JumpIfNotObjectType(x3, x2, x2, JS_FUNCTION_TYPE, &new_object);
// Load the initial map and verify that it's in fact a map.
__ Ldr(x2, FieldMemOperand(x3, JSFunction::kPrototypeOrInitialMapOffset));
__ JumpIfSmi(x2, &new_object);
__ JumpIfNotObjectType(x2, x0, x0, MAP_TYPE, &new_object);
// Fall back to runtime if the target differs from the new target's
// initial map constructor.
__ Ldr(x0, FieldMemOperand(x2, Map::kConstructorOrBackPointerOffset));
__ CompareAndBranch(x0, x1, ne, &new_object);
// Allocate the JSObject on the heap.
Label allocate, done_allocate;
__ Ldrb(x4, FieldMemOperand(x2, Map::kInstanceSizeOffset));
__ Allocate(x4, x0, x5, x6, &allocate, SIZE_IN_WORDS);
__ Bind(&done_allocate);
// Initialize the JSObject fields.
STATIC_ASSERT(JSObject::kMapOffset == 0 * kPointerSize);
__ Str(x2, FieldMemOperand(x0, JSObject::kMapOffset));
__ LoadRoot(x3, Heap::kEmptyFixedArrayRootIndex);
STATIC_ASSERT(JSObject::kPropertiesOffset == 1 * kPointerSize);
STATIC_ASSERT(JSObject::kElementsOffset == 2 * kPointerSize);
__ Str(x3, FieldMemOperand(x0, JSObject::kPropertiesOffset));
__ Str(x3, FieldMemOperand(x0, JSObject::kElementsOffset));
STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
__ Add(x1, x0, Operand(JSObject::kHeaderSize - kHeapObjectTag));
// ----------- S t a t e -------------
// -- x0 : result (tagged)
// -- x1 : result fields (untagged)
// -- x5 : result end (untagged)
// -- x2 : initial map
// -- cp : context
// -- lr : return address
// -----------------------------------
// Perform in-object slack tracking if requested.
Label slack_tracking;
STATIC_ASSERT(Map::kNoSlackTracking == 0);
__ LoadRoot(x6, Heap::kUndefinedValueRootIndex);
__ Ldr(w3, FieldMemOperand(x2, Map::kBitField3Offset));
__ TestAndBranchIfAnySet(w3, Map::ConstructionCounter::kMask,
&slack_tracking);
{
// Initialize all in-object fields with undefined.
__ InitializeFieldsWithFiller(x1, x5, x6);
__ Ret();
}
__ Bind(&slack_tracking);
{
// Decrease generous allocation count.
STATIC_ASSERT(Map::ConstructionCounter::kNext == 32);
__ Sub(w3, w3, 1 << Map::ConstructionCounter::kShift);
__ Str(w3, FieldMemOperand(x2, Map::kBitField3Offset));
// Initialize the in-object fields with undefined.
__ Ldrb(x4, FieldMemOperand(x2, Map::kUnusedPropertyFieldsOffset));
__ Sub(x4, x5, Operand(x4, LSL, kPointerSizeLog2));
__ InitializeFieldsWithFiller(x1, x4, x6);
// Initialize the remaining (reserved) fields with one pointer filler map.
__ LoadRoot(x6, Heap::kOnePointerFillerMapRootIndex);
__ InitializeFieldsWithFiller(x1, x5, x6);
// Check if we can finalize the instance size.
Label finalize;
STATIC_ASSERT(Map::kSlackTrackingCounterEnd == 1);
__ TestAndBranchIfAllClear(w3, Map::ConstructionCounter::kMask, &finalize);
__ Ret();
// Finalize the instance size.
__ Bind(&finalize);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(x0, x2);
__ CallRuntime(Runtime::kFinalizeInstanceSize);
__ Pop(x0);
}
__ Ret();
}
// Fall back to %AllocateInNewSpace.
__ Bind(&allocate);
{
FrameScope scope(masm, StackFrame::INTERNAL);
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize == 1);
__ Mov(x4,
Operand(x4, LSL, kPointerSizeLog2 + kSmiTagSize + kSmiShiftSize));
__ Push(x2, x4);
__ CallRuntime(Runtime::kAllocateInNewSpace);
__ Pop(x2);
}
__ Ldrb(x5, FieldMemOperand(x2, Map::kInstanceSizeOffset));
__ Add(x5, x0, Operand(x5, LSL, kPointerSizeLog2));
STATIC_ASSERT(kHeapObjectTag == 1);
__ Sub(x5, x5, kHeapObjectTag); // Subtract the tag from end.
__ B(&done_allocate);
// Fall back to %NewObject.
__ Bind(&new_object);
__ Push(x1, x3);
__ TailCallRuntime(Runtime::kNewObject);
}
void FastNewRestParameterStub::Generate(MacroAssembler* masm) { void FastNewRestParameterStub::Generate(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- x1 : function // -- x1 : function

View File

@ -355,14 +355,6 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
Handle<Name> name, Handle<Name> name,
Register scratch0); Register scratch0);
static void GeneratePositiveLookup(MacroAssembler* masm,
Label* miss,
Label* done,
Register elements,
Register name,
Register scratch1,
Register scratch2);
bool SometimesSetsUpAFrame() override { return false; } bool SometimesSetsUpAFrame() override { return false; }
private: private:

View File

@ -40,272 +40,6 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
// Code generators // Code generators
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register target_map,
AllocationSiteMode mode,
Label* allocation_memento_found) {
ASM_LOCATION(
"ElementsTransitionGenerator::GenerateMapChangeElementsTransition");
DCHECK(!AreAliased(receiver, key, value, target_map));
if (mode == TRACK_ALLOCATION_SITE) {
DCHECK(allocation_memento_found != NULL);
__ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11,
allocation_memento_found);
}
// Set transitioned map.
__ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver,
HeapObject::kMapOffset,
target_map,
x10,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
}
void ElementsTransitionGenerator::GenerateSmiToDouble(
MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register target_map,
AllocationSiteMode mode,
Label* fail) {
ASM_LOCATION("ElementsTransitionGenerator::GenerateSmiToDouble");
Label gc_required, only_change_map;
Register elements = x4;
Register length = x5;
Register array_size = x6;
Register array = x7;
Register scratch = x6;
// Verify input registers don't conflict with locals.
DCHECK(!AreAliased(receiver, key, value, target_map,
elements, length, array_size, array));
if (mode == TRACK_ALLOCATION_SITE) {
__ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
}
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
__ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
__ Push(lr);
__ Ldrsw(length, UntagSmiFieldMemOperand(elements,
FixedArray::kLengthOffset));
// Allocate new FixedDoubleArray.
__ Lsl(array_size, length, kDoubleSizeLog2);
__ Add(array_size, array_size, FixedDoubleArray::kHeaderSize);
__ Allocate(array_size, array, x10, x11, &gc_required, DOUBLE_ALIGNMENT);
// Register array is non-tagged heap object.
// Set the destination FixedDoubleArray's length and map.
Register map_root = array_size;
__ LoadRoot(map_root, Heap::kFixedDoubleArrayMapRootIndex);
__ SmiTag(x11, length);
__ Str(x11, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
__ Str(map_root, FieldMemOperand(array, HeapObject::kMapOffset));
__ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created FixedDoubleArray.
__ Move(x10, array);
__ Str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ RecordWriteField(receiver, JSObject::kElementsOffset, x10, scratch,
kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Prepare for conversion loop.
Register src_elements = x10;
Register dst_elements = x11;
Register dst_end = x12;
__ Add(src_elements, elements, FixedArray::kHeaderSize - kHeapObjectTag);
__ Add(dst_elements, array, FixedDoubleArray::kHeaderSize - kHeapObjectTag);
__ Add(dst_end, dst_elements, Operand(length, LSL, kDoubleSizeLog2));
FPRegister nan_d = d1;
__ Fmov(nan_d, rawbits_to_double(kHoleNanInt64));
Label entry, done;
__ B(&entry);
__ Bind(&only_change_map);
__ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ B(&done);
// Call into runtime if GC is required.
__ Bind(&gc_required);
__ Pop(lr);
__ B(fail);
// Iterate over the array, copying and coverting smis to doubles. If an
// element is non-smi, write a hole to the destination.
{
Label loop;
__ Bind(&loop);
__ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex));
__ SmiUntagToDouble(d0, x13, kSpeculativeUntag);
__ Tst(x13, kSmiTagMask);
__ Fcsel(d0, d0, nan_d, eq);
__ Str(d0, MemOperand(dst_elements, kDoubleSize, PostIndex));
__ Bind(&entry);
__ Cmp(dst_elements, dst_end);
__ B(lt, &loop);
}
__ Pop(lr);
__ Bind(&done);
}
void ElementsTransitionGenerator::GenerateDoubleToObject(
MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register target_map,
AllocationSiteMode mode,
Label* fail) {
ASM_LOCATION("ElementsTransitionGenerator::GenerateDoubleToObject");
Register elements = x4;
Register array_size = x6;
Register array = x7;
Register length = x5;
// Verify input registers don't conflict with locals.
DCHECK(!AreAliased(receiver, key, value, target_map,
elements, array_size, array, length));
if (mode == TRACK_ALLOCATION_SITE) {
__ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
}
// Check for empty arrays, which only require a map transition and no changes
// to the backing store.
Label only_change_map;
__ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
__ Push(lr);
// TODO(all): These registers may not need to be pushed. Examine
// RecordWriteStub and check whether it's needed.
__ Push(target_map, receiver, key, value);
__ Ldrsw(length, UntagSmiFieldMemOperand(elements,
FixedArray::kLengthOffset));
// Allocate new FixedArray.
Label gc_required;
__ Mov(array_size, FixedDoubleArray::kHeaderSize);
__ Add(array_size, array_size, Operand(length, LSL, kPointerSizeLog2));
__ Allocate(array_size, array, x10, x11, &gc_required, NO_ALLOCATION_FLAGS);
// Set destination FixedDoubleArray's length and map.
Register map_root = array_size;
__ LoadRoot(map_root, Heap::kFixedArrayMapRootIndex);
__ SmiTag(x11, length);
__ Str(x11, FieldMemOperand(array, FixedDoubleArray::kLengthOffset));
__ Str(map_root, FieldMemOperand(array, HeapObject::kMapOffset));
// Prepare for conversion loop.
Register src_elements = x10;
Register dst_elements = x11;
Register dst_end = x12;
Register the_hole = x14;
__ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
__ Add(src_elements, elements,
FixedDoubleArray::kHeaderSize - kHeapObjectTag);
__ Add(dst_elements, array, FixedArray::kHeaderSize - kHeapObjectTag);
__ Add(dst_end, dst_elements, Operand(length, LSL, kPointerSizeLog2));
// Allocating heap numbers in the loop below can fail and cause a jump to
// gc_required. We can't leave a partly initialized FixedArray behind,
// so pessimistically fill it with holes now.
Label initialization_loop, initialization_loop_entry;
__ B(&initialization_loop_entry);
__ bind(&initialization_loop);
__ Str(the_hole, MemOperand(dst_elements, kPointerSize, PostIndex));
__ bind(&initialization_loop_entry);
__ Cmp(dst_elements, dst_end);
__ B(lt, &initialization_loop);
__ Add(dst_elements, array, FixedArray::kHeaderSize - kHeapObjectTag);
Register heap_num_map = x15;
__ LoadRoot(heap_num_map, Heap::kHeapNumberMapRootIndex);
Label entry;
__ B(&entry);
// Call into runtime if GC is required.
__ Bind(&gc_required);
__ Pop(value, key, receiver, target_map);
__ Pop(lr);
__ B(fail);
{
Label loop, convert_hole;
__ Bind(&loop);
__ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex));
__ Cmp(x13, kHoleNanInt64);
__ B(eq, &convert_hole);
// Non-hole double, copy value into a heap number.
Register heap_num = length;
Register scratch = array_size;
Register scratch2 = elements;
__ AllocateHeapNumber(heap_num, &gc_required, scratch, scratch2,
x13, heap_num_map);
__ Mov(x13, dst_elements);
__ Str(heap_num, MemOperand(dst_elements, kPointerSize, PostIndex));
__ RecordWrite(array, x13, heap_num, kLRHasBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
__ B(&entry);
// Replace the-hole NaN with the-hole pointer.
__ Bind(&convert_hole);
__ Str(the_hole, MemOperand(dst_elements, kPointerSize, PostIndex));
__ Bind(&entry);
__ Cmp(dst_elements, dst_end);
__ B(lt, &loop);
}
__ Pop(value, key, receiver, target_map);
// Replace receiver's backing store with newly created and filled FixedArray.
__ Str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ RecordWriteField(receiver, JSObject::kElementsOffset, array, x13,
kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ Pop(lr);
__ Bind(&only_change_map);
__ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x13,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
}
CodeAgingHelper::CodeAgingHelper(Isolate* isolate) { CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
USE(isolate); USE(isolate);
DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength); DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
@ -338,30 +72,22 @@ bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
return MacroAssembler::IsYoungSequence(isolate, sequence); return MacroAssembler::IsYoungSequence(isolate, sequence);
} }
Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) {
if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge;
void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age, byte* target = sequence + kCodeAgeStubEntryOffset;
MarkingParity* parity) { Code* stub = GetCodeFromTargetAddress(Memory::Address_at(target));
if (IsYoungSequence(isolate, sequence)) { return GetAgeOfCodeAgeStub(stub);
*age = kNoAgeCodeAge;
*parity = NO_MARKING_PARITY;
} else {
byte* target = sequence + kCodeAgeStubEntryOffset;
Code* stub = GetCodeFromTargetAddress(Memory::Address_at(target));
GetCodeAgeAndParity(stub, age, parity);
}
} }
void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
void Code::PatchPlatformCodeAge(Isolate* isolate, Code::Age age) {
byte* sequence,
Code::Age age,
MarkingParity parity) {
PatchingAssembler patcher(isolate, sequence, PatchingAssembler patcher(isolate, sequence,
kNoCodeAgeSequenceLength / kInstructionSize); kNoCodeAgeSequenceLength / kInstructionSize);
if (age == kNoAgeCodeAge) { if (age == kNoAgeCodeAge) {
MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher); MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher);
} else { } else {
Code * stub = GetCodeAgeStub(isolate, age, parity); Code* stub = GetCodeAgeStub(isolate, age);
MacroAssembler::EmitCodeAgeSequence(&patcher, stub); MacroAssembler::EmitCodeAgeSequence(&patcher, stub);
} }
} }

View File

@ -64,14 +64,10 @@ const Register GrowArrayElementsDescriptor::KeyRegister() { return x3; }
void FastNewClosureDescriptor::InitializePlatformSpecific( void FastNewClosureDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
// x2: function info // x1: function info
Register registers[] = {x2}; // x2: feedback vector
data->InitializePlatformSpecific(arraysize(registers), registers); // x3: slot
} Register registers[] = {x1, x2, x3};
void FastNewObjectDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {x1, x3};
data->InitializePlatformSpecific(arraysize(registers), registers); data->InitializePlatformSpecific(arraysize(registers), registers);
} }

View File

@ -2203,65 +2203,6 @@ void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
Bind(&done); Bind(&done);
} }
void MacroAssembler::JumpIfEitherIsNotSequentialOneByteStrings(
Register first, Register second, Register scratch1, Register scratch2,
Label* failure, SmiCheckType smi_check) {
if (smi_check == DO_SMI_CHECK) {
JumpIfEitherSmi(first, second, failure);
} else if (emit_debug_code()) {
DCHECK(smi_check == DONT_DO_SMI_CHECK);
Label not_smi;
JumpIfEitherSmi(first, second, NULL, &not_smi);
// At least one input is a smi, but the flags indicated a smi check wasn't
// needed.
Abort(kUnexpectedSmi);
Bind(&not_smi);
}
// Test that both first and second are sequential one-byte strings.
Ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
Ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
Ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
JumpIfEitherInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, scratch1,
scratch2, failure);
}
void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialOneByte(
Register first, Register second, Register scratch1, Register scratch2,
Label* failure) {
DCHECK(!AreAliased(scratch1, second));
DCHECK(!AreAliased(scratch1, scratch2));
const int kFlatOneByteStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
const int kFlatOneByteStringTag =
kStringTag | kOneByteStringTag | kSeqStringTag;
And(scratch1, first, kFlatOneByteStringMask);
And(scratch2, second, kFlatOneByteStringMask);
Cmp(scratch1, kFlatOneByteStringTag);
Ccmp(scratch2, kFlatOneByteStringTag, NoFlag, eq);
B(ne, failure);
}
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
Register scratch,
Label* failure) {
const int kFlatOneByteStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
const int kFlatOneByteStringTag =
kStringTag | kOneByteStringTag | kSeqStringTag;
And(scratch, type, kFlatOneByteStringMask);
Cmp(scratch, kFlatOneByteStringTag);
B(ne, failure);
}
void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte( void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
Register first, Register second, Register scratch1, Register scratch2, Register first, Register second, Register scratch1, Register scratch2,
Label* failure) { Label* failure) {
@ -2425,17 +2366,15 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
Bind(&regular_invoke); Bind(&regular_invoke);
} }
void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target, const ParameterCount& expected,
const ParameterCount& expected, const ParameterCount& actual) {
const ParameterCount& actual) { Label skip_hook;
Label skip_flooding; ExternalReference debug_hook_active =
ExternalReference last_step_action = ExternalReference::debug_hook_on_function_call_address(isolate());
ExternalReference::debug_last_step_action_address(isolate()); Mov(x4, Operand(debug_hook_active));
STATIC_ASSERT(StepFrame > StepIn);
Mov(x4, Operand(last_step_action));
Ldrsb(x4, MemOperand(x4)); Ldrsb(x4, MemOperand(x4));
CompareAndBranch(x4, Operand(StepIn), lt, &skip_flooding); CompareAndBranch(x4, Operand(0), eq, &skip_hook);
{ {
FrameScope frame(this, FrameScope frame(this,
has_frame() ? StackFrame::NONE : StackFrame::INTERNAL); has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@ -2452,7 +2391,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
} }
Push(fun); Push(fun);
Push(fun); Push(fun);
CallRuntime(Runtime::kDebugPrepareStepInIfStepping); CallRuntime(Runtime::kDebugOnFunctionCall);
Pop(fun); Pop(fun);
if (new_target.is_valid()) { if (new_target.is_valid()) {
Pop(new_target); Pop(new_target);
@ -2466,7 +2405,7 @@ void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target,
SmiUntag(expected.reg()); SmiUntag(expected.reg());
} }
} }
bind(&skip_flooding); bind(&skip_hook);
} }
@ -2480,7 +2419,9 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
DCHECK(function.is(x1)); DCHECK(function.is(x1));
DCHECK_IMPLIES(new_target.is_valid(), new_target.is(x3)); DCHECK_IMPLIES(new_target.is_valid(), new_target.is(x3));
FloodFunctionIfStepping(function, new_target, expected, actual); if (call_wrapper.NeedsDebugHookCheck()) {
CheckDebugHook(function, new_target, expected, actual);
}
// Clear the new.target register if not given. // Clear the new.target register if not given.
if (!new_target.is_valid()) { if (!new_target.is_valid()) {
@ -2709,12 +2650,12 @@ void MacroAssembler::EnterFrame(StackFrame::Type type,
void MacroAssembler::EnterFrame(StackFrame::Type type) { void MacroAssembler::EnterFrame(StackFrame::Type type) {
DCHECK(jssp.Is(StackPointer()));
UseScratchRegisterScope temps(this); UseScratchRegisterScope temps(this);
Register type_reg = temps.AcquireX(); Register type_reg = temps.AcquireX();
Register code_reg = temps.AcquireX(); Register code_reg = temps.AcquireX();
if (type == StackFrame::INTERNAL) { if (type == StackFrame::INTERNAL) {
DCHECK(jssp.Is(StackPointer()));
Mov(type_reg, Smi::FromInt(type)); Mov(type_reg, Smi::FromInt(type));
Push(lr, fp); Push(lr, fp);
Push(type_reg); Push(type_reg);
@ -2725,7 +2666,18 @@ void MacroAssembler::EnterFrame(StackFrame::Type type) {
// jssp[3] : fp // jssp[3] : fp
// jssp[1] : type // jssp[1] : type
// jssp[0] : [code object] // jssp[0] : [code object]
} else if (type == StackFrame::WASM_COMPILED) {
DCHECK(csp.Is(StackPointer()));
Mov(type_reg, Smi::FromInt(type));
Push(xzr, lr);
Push(fp, type_reg);
Add(fp, csp, TypedFrameConstants::kFixedFrameSizeFromFp);
// csp[3] for alignment
// csp[2] : lr
// csp[1] : fp
// csp[0] : type
} else { } else {
DCHECK(jssp.Is(StackPointer()));
Mov(type_reg, Smi::FromInt(type)); Mov(type_reg, Smi::FromInt(type));
Push(lr, fp); Push(lr, fp);
Push(type_reg); Push(type_reg);
@ -3208,114 +3160,6 @@ void MacroAssembler::FastAllocate(Register object_size, Register result,
ObjectTag(result, result); ObjectTag(result, result);
} }
void MacroAssembler::AllocateTwoByteString(Register result,
Register length,
Register scratch1,
Register scratch2,
Register scratch3,
Label* gc_required) {
DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3));
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
Add(scratch1, length, length); // Length in bytes, not chars.
Add(scratch1, scratch1, kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
Bic(scratch1, scratch1, kObjectAlignmentMask);
// Allocate two-byte string in new space.
Allocate(scratch1, result, scratch2, scratch3, gc_required,
NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
InitializeNewString(result,
length,
Heap::kStringMapRootIndex,
scratch1,
scratch2);
}
void MacroAssembler::AllocateOneByteString(Register result, Register length,
Register scratch1, Register scratch2,
Register scratch3,
Label* gc_required) {
DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3));
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
STATIC_ASSERT(kCharSize == 1);
Add(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
Bic(scratch1, scratch1, kObjectAlignmentMask);
// Allocate one-byte string in new space.
Allocate(scratch1, result, scratch2, scratch3, gc_required,
NO_ALLOCATION_FLAGS);
// Set the map, length and hash field.
InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
scratch1, scratch2);
}
void MacroAssembler::AllocateTwoByteConsString(Register result,
Register length,
Register scratch1,
Register scratch2,
Label* gc_required) {
Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
NO_ALLOCATION_FLAGS);
InitializeNewString(result,
length,
Heap::kConsStringMapRootIndex,
scratch1,
scratch2);
}
void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
Register scratch1,
Register scratch2,
Label* gc_required) {
Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
NO_ALLOCATION_FLAGS);
InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
scratch1, scratch2);
}
void MacroAssembler::AllocateTwoByteSlicedString(Register result,
Register length,
Register scratch1,
Register scratch2,
Label* gc_required) {
DCHECK(!AreAliased(result, length, scratch1, scratch2));
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
NO_ALLOCATION_FLAGS);
InitializeNewString(result,
length,
Heap::kSlicedStringMapRootIndex,
scratch1,
scratch2);
}
void MacroAssembler::AllocateOneByteSlicedString(Register result,
Register length,
Register scratch1,
Register scratch2,
Label* gc_required) {
DCHECK(!AreAliased(result, length, scratch1, scratch2));
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
NO_ALLOCATION_FLAGS);
InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
scratch1, scratch2);
}
// Allocates a heap number or jumps to the need_gc label if the young space // Allocates a heap number or jumps to the need_gc label if the young space
// is full and a scavenge is needed. // is full and a scavenge is needed.
void MacroAssembler::AllocateHeapNumber(Register result, void MacroAssembler::AllocateHeapNumber(Register result,
@ -3664,59 +3508,6 @@ void MacroAssembler::TestAndSplit(const Register& reg,
} }
} }
void MacroAssembler::CheckFastObjectElements(Register map,
Register scratch,
Label* fail) {
STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
STATIC_ASSERT(FAST_ELEMENTS == 2);
STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
Cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
// If cond==ls, set cond=hi, otherwise compare.
Ccmp(scratch,
Operand(Map::kMaximumBitField2FastHoleyElementValue), CFlag, hi);
B(hi, fail);
}
// Note: The ARM version of this clobbers elements_reg, but this version does
// not. Some uses of this in ARM64 assume that elements_reg will be preserved.
void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
Register elements_reg,
Register scratch1,
FPRegister fpscratch1,
Label* fail,
int elements_offset) {
DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
Label store_num;
// Speculatively convert the smi to a double - all smis can be exactly
// represented as a double.
SmiUntagToDouble(fpscratch1, value_reg, kSpeculativeUntag);
// If value_reg is a smi, we're done.
JumpIfSmi(value_reg, &store_num);
// Ensure that the object is a heap number.
JumpIfNotHeapNumber(value_reg, fail);
Ldr(fpscratch1, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
// Canonicalize NaNs.
CanonicalizeNaN(fpscratch1);
// Store the result.
Bind(&store_num);
Add(scratch1, elements_reg,
Operand::UntagSmiAndScale(key_reg, kDoubleSizeLog2));
Str(fpscratch1,
FieldMemOperand(scratch1,
FixedDoubleArray::kHeaderSize - elements_offset));
}
bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
return has_frame_ || !stub->SometimesSetsUpAFrame(); return has_frame_ || !stub->SometimesSetsUpAFrame();
} }
@ -4276,39 +4067,6 @@ void MacroAssembler::JumpIfBlack(Register object,
HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern. HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
} }
void MacroAssembler::JumpIfDictionaryInPrototypeChain(
Register object,
Register scratch0,
Register scratch1,
Label* found) {
DCHECK(!AreAliased(object, scratch0, scratch1));
Register current = scratch0;
Label loop_again, end;
// Scratch contains elements pointer.
Mov(current, object);
Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
CompareAndBranch(current, Heap::kNullValueRootIndex, eq, &end);
// Loop based on the map going up the prototype chain.
Bind(&loop_again);
Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE);
STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
CompareInstanceType(current, scratch1, JS_OBJECT_TYPE);
B(lo, found);
Ldrb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
DecodeField<Map::ElementsKindBits>(scratch1);
CompareAndBranch(scratch1, DICTIONARY_ELEMENTS, eq, found);
Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
CompareAndBranch(current, Heap::kNullValueRootIndex, ne, &loop_again);
Bind(&end);
}
void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch, void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
Register shift_scratch, Register load_scratch, Register shift_scratch, Register load_scratch,
Register length_scratch, Register length_scratch,
@ -4471,30 +4229,6 @@ void MacroAssembler::Abort(BailoutReason reason) {
TmpList()->set_list(old_tmp_list); TmpList()->set_list(old_tmp_list);
} }
void MacroAssembler::LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
ElementsKind transitioned_kind,
Register map_in_out,
Register scratch1,
Register scratch2,
Label* no_map_match) {
DCHECK(IsFastElementsKind(expected_kind));
DCHECK(IsFastElementsKind(transitioned_kind));
// Check that the function's map is the same as the expected cached map.
Ldr(scratch1, NativeContextMemOperand());
Ldr(scratch2,
ContextMemOperand(scratch1, Context::ArrayMapIndex(expected_kind)));
Cmp(map_in_out, scratch2);
B(ne, no_map_match);
// Use the transitioned cached map.
Ldr(map_in_out,
ContextMemOperand(scratch1, Context::ArrayMapIndex(transitioned_kind)));
}
void MacroAssembler::LoadNativeContextSlot(int index, Register dst) { void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
Ldr(dst, NativeContextMemOperand()); Ldr(dst, NativeContextMemOperand());
Ldr(dst, ContextMemOperand(dst, index)); Ldr(dst, ContextMemOperand(dst, index));

View File

@ -1101,24 +1101,6 @@ class MacroAssembler : public Assembler {
// ---- String Utilities ---- // ---- String Utilities ----
// Jump to label if either object is not a sequential one-byte string.
// Optionally perform a smi check on the objects first.
void JumpIfEitherIsNotSequentialOneByteStrings(
Register first, Register second, Register scratch1, Register scratch2,
Label* failure, SmiCheckType smi_check = DO_SMI_CHECK);
// Check if instance type is sequential one-byte string and jump to label if
// it is not.
void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
Label* failure);
// Checks if both instance types are sequential one-byte strings and jumps to
// label if either is not.
void JumpIfEitherInstanceTypeIsNotSequentialOneByte(
Register first_object_instance_type, Register second_object_instance_type,
Register scratch1, Register scratch2, Label* failure);
// Checks if both instance types are sequential one-byte strings and jumps to // Checks if both instance types are sequential one-byte strings and jumps to
// label if either is not. // label if either is not.
void JumpIfBothInstanceTypesAreNotSequentialOneByte( void JumpIfBothInstanceTypesAreNotSequentialOneByte(
@ -1227,9 +1209,11 @@ class MacroAssembler : public Assembler {
InvokeFlag flag, InvokeFlag flag,
bool* definitely_mismatches, bool* definitely_mismatches,
const CallWrapper& call_wrapper); const CallWrapper& call_wrapper);
void FloodFunctionIfStepping(Register fun, Register new_target,
const ParameterCount& expected, // On function call, call into the debugger if necessary.
const ParameterCount& actual); void CheckDebugHook(Register fun, Register new_target,
const ParameterCount& expected,
const ParameterCount& actual);
void InvokeFunctionCode(Register function, Register new_target, void InvokeFunctionCode(Register function, Register new_target,
const ParameterCount& expected, const ParameterCount& expected,
const ParameterCount& actual, InvokeFlag flag, const ParameterCount& actual, InvokeFlag flag,
@ -1360,32 +1344,6 @@ class MacroAssembler : public Assembler {
void FastAllocate(int object_size, Register result, Register scratch1, void FastAllocate(int object_size, Register result, Register scratch1,
Register scratch2, AllocationFlags flags); Register scratch2, AllocationFlags flags);
void AllocateTwoByteString(Register result,
Register length,
Register scratch1,
Register scratch2,
Register scratch3,
Label* gc_required);
void AllocateOneByteString(Register result, Register length,
Register scratch1, Register scratch2,
Register scratch3, Label* gc_required);
void AllocateTwoByteConsString(Register result,
Register length,
Register scratch1,
Register scratch2,
Label* gc_required);
void AllocateOneByteConsString(Register result, Register length,
Register scratch1, Register scratch2,
Label* gc_required);
void AllocateTwoByteSlicedString(Register result,
Register length,
Register scratch1,
Register scratch2,
Label* gc_required);
void AllocateOneByteSlicedString(Register result, Register length,
Register scratch1, Register scratch2,
Label* gc_required);
// Allocates a heap number or jumps to the gc_required label if the young // Allocates a heap number or jumps to the gc_required label if the young
// space is full and a scavenge is needed. // space is full and a scavenge is needed.
// All registers are clobbered. // All registers are clobbered.
@ -1566,21 +1524,6 @@ class MacroAssembler : public Assembler {
Label* if_any_set, Label* if_any_set,
Label* fall_through); Label* fall_through);
// Check if a map for a JSObject indicates that the object can have both smi
// and HeapObject elements. Jump to the specified label if it does not.
void CheckFastObjectElements(Register map, Register scratch, Label* fail);
// Check to see if number can be stored as a double in FastDoubleElements.
// If it can, store it at the index specified by key_reg in the array,
// otherwise jump to fail.
void StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
Register elements_reg,
Register scratch1,
FPRegister fpscratch1,
Label* fail,
int elements_offset = 0);
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Inline caching support. // Inline caching support.
@ -1624,17 +1567,6 @@ class MacroAssembler : public Assembler {
Register scratch2, Register scratch2,
Label* no_memento_found); Label* no_memento_found);
void JumpIfJSArrayHasAllocationMemento(Register receiver,
Register scratch1,
Register scratch2,
Label* memento_found) {
Label no_memento_found;
TestJSArrayForAllocationMemento(receiver, scratch1, scratch2,
&no_memento_found);
B(eq, memento_found);
Bind(&no_memento_found);
}
// The stack pointer has to switch between csp and jssp when setting up and // The stack pointer has to switch between csp and jssp when setting up and
// destroying the exit frame. Hence preserving/restoring the registers is // destroying the exit frame. Hence preserving/restoring the registers is
// slightly more complicated than simple push/pop operations. // slightly more complicated than simple push/pop operations.
@ -1902,18 +1834,6 @@ class MacroAssembler : public Assembler {
// Print a message to stderr and abort execution. // Print a message to stderr and abort execution.
void Abort(BailoutReason reason); void Abort(BailoutReason reason);
// Conditionally load the cached Array transitioned map of type
// transitioned_kind from the native context if the map in register
// map_in_out is the cached Array map in the native context of
// expected_kind.
void LoadTransitionedArrayMapConditional(
ElementsKind expected_kind,
ElementsKind transitioned_kind,
Register map_in_out,
Register scratch1,
Register scratch2,
Label* no_map_match);
void LoadNativeContextSlot(int index, Register dst); void LoadNativeContextSlot(int index, Register dst);
// Load the initial map from the global function. The registers function and // Load the initial map from the global function. The registers function and
@ -2002,10 +1922,6 @@ class MacroAssembler : public Assembler {
// sequence is a code age sequence (emitted by EmitCodeAgeSequence). // sequence is a code age sequence (emitted by EmitCodeAgeSequence).
static bool IsYoungSequence(Isolate* isolate, byte* sequence); static bool IsYoungSequence(Isolate* isolate, byte* sequence);
// Jumps to found label if a prototype map has dictionary elements.
void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
Register scratch1, Label* found);
// Perform necessary maintenance operations before a push or after a pop. // Perform necessary maintenance operations before a push or after a pop.
// //
// Note that size is specified in bytes. // Note that size is specified in bytes.

View File

@ -4,6 +4,7 @@ set noparent
ahaas@chromium.org ahaas@chromium.org
bradnelson@chromium.org bradnelson@chromium.org
clemensh@chromium.org
jpp@chromium.org jpp@chromium.org
mtrofin@chromium.org mtrofin@chromium.org
rossberg@chromium.org rossberg@chromium.org

View File

@ -9,6 +9,7 @@
#include "src/asmjs/asm-typer.h" #include "src/asmjs/asm-typer.h"
#include "src/asmjs/asm-wasm-builder.h" #include "src/asmjs/asm-wasm-builder.h"
#include "src/assert-scope.h" #include "src/assert-scope.h"
#include "src/compilation-info.h"
#include "src/execution.h" #include "src/execution.h"
#include "src/factory.h" #include "src/factory.h"
#include "src/handles.h" #include "src/handles.h"
@ -31,6 +32,15 @@ namespace v8 {
namespace internal { namespace internal {
namespace { namespace {
enum WasmDataEntries {
kWasmDataCompiledModule,
kWasmDataForeignGlobals,
kWasmDataUsesArray,
kWasmDataScript,
kWasmDataScriptPosition,
kWasmDataEntryCount,
};
Handle<i::Object> StdlibMathMember(i::Isolate* isolate, Handle<i::Object> StdlibMathMember(i::Isolate* isolate,
Handle<JSReceiver> stdlib, Handle<JSReceiver> stdlib,
Handle<Name> name) { Handle<Name> name) {
@ -151,29 +161,38 @@ bool IsStdlibMemberValid(i::Isolate* isolate, Handle<JSReceiver> stdlib,
} // namespace } // namespace
MaybeHandle<FixedArray> AsmJs::ConvertAsmToWasm(ParseInfo* info) { MaybeHandle<FixedArray> AsmJs::CompileAsmViaWasm(CompilationInfo* info) {
ErrorThrower thrower(info->isolate(), "Asm.js -> WebAssembly conversion"); ErrorThrower thrower(info->isolate(), "Asm.js -> WebAssembly conversion");
wasm::AsmTyper typer(info->isolate(), info->zone(), *(info->script()), base::ElapsedTimer asm_wasm_timer;
info->literal()); asm_wasm_timer.Start();
if (!typer.Validate()) { wasm::AsmWasmBuilder builder(info);
Handle<FixedArray> foreign_globals;
auto asm_wasm_result = builder.Run(&foreign_globals);
if (!asm_wasm_result.success) {
DCHECK(!info->isolate()->has_pending_exception()); DCHECK(!info->isolate()->has_pending_exception());
PrintF("Validation of asm.js module failed: %s", typer.error_message()); if (!FLAG_suppress_asm_messages) {
MessageHandler::ReportMessage(info->isolate(),
builder.typer()->message_location(),
builder.typer()->error_message());
}
return MaybeHandle<FixedArray>(); return MaybeHandle<FixedArray>();
} }
v8::internal::wasm::AsmWasmBuilder builder(info->isolate(), info->zone(), double asm_wasm_time = asm_wasm_timer.Elapsed().InMillisecondsF();
info->literal(), &typer);
i::Handle<i::FixedArray> foreign_globals;
auto asm_wasm_result = builder.Run(&foreign_globals);
wasm::ZoneBuffer* module = asm_wasm_result.module_bytes; wasm::ZoneBuffer* module = asm_wasm_result.module_bytes;
wasm::ZoneBuffer* asm_offsets = asm_wasm_result.asm_offset_table; wasm::ZoneBuffer* asm_offsets = asm_wasm_result.asm_offset_table;
Vector<const byte> asm_offsets_vec(asm_offsets->begin(),
static_cast<int>(asm_offsets->size()));
i::MaybeHandle<i::JSObject> compiled = wasm::CreateModuleObjectFromBytes( base::ElapsedTimer compile_timer;
compile_timer.Start();
MaybeHandle<JSObject> compiled = wasm::CreateModuleObjectFromBytes(
info->isolate(), module->begin(), module->end(), &thrower, info->isolate(), module->begin(), module->end(), &thrower,
internal::wasm::kAsmJsOrigin, info->script(), asm_offsets->begin(), internal::wasm::kAsmJsOrigin, info->script(), asm_offsets_vec);
asm_offsets->end());
DCHECK(!compiled.is_null()); DCHECK(!compiled.is_null());
double compile_time = compile_timer.Elapsed().InMillisecondsF();
wasm::AsmTyper::StdlibSet uses = typer.StdlibUses(); wasm::AsmTyper::StdlibSet uses = builder.typer()->StdlibUses();
Handle<FixedArray> uses_array = Handle<FixedArray> uses_array =
info->isolate()->factory()->NewFixedArray(static_cast<int>(uses.size())); info->isolate()->factory()->NewFixedArray(static_cast<int>(uses.size()));
int count = 0; int count = 0;
@ -181,16 +200,45 @@ MaybeHandle<FixedArray> AsmJs::ConvertAsmToWasm(ParseInfo* info) {
uses_array->set(count++, Smi::FromInt(i)); uses_array->set(count++, Smi::FromInt(i));
} }
Handle<FixedArray> result = info->isolate()->factory()->NewFixedArray(3); Handle<FixedArray> result =
result->set(0, *compiled.ToHandleChecked()); info->isolate()->factory()->NewFixedArray(kWasmDataEntryCount);
result->set(1, *foreign_globals); result->set(kWasmDataCompiledModule, *compiled.ToHandleChecked());
result->set(2, *uses_array); result->set(kWasmDataForeignGlobals, *foreign_globals);
result->set(kWasmDataUsesArray, *uses_array);
result->set(kWasmDataScript, *info->script());
result->set(kWasmDataScriptPosition,
Smi::FromInt(info->literal()->position()));
MessageLocation location(info->script(), info->literal()->position(),
info->literal()->position());
char text[100];
int length;
if (FLAG_predictable) {
length = base::OS::SNPrintF(text, arraysize(text), "success");
} else {
length =
base::OS::SNPrintF(text, arraysize(text),
"success, asm->wasm: %0.3f ms, compile: %0.3f ms",
asm_wasm_time, compile_time);
}
DCHECK_NE(-1, length);
USE(length);
Handle<String> stext(info->isolate()->factory()->InternalizeUtf8String(text));
Handle<JSMessageObject> message = MessageHandler::MakeMessageObject(
info->isolate(), MessageTemplate::kAsmJsCompiled, &location, stext,
Handle<JSArray>::null());
message->set_error_level(v8::Isolate::kMessageInfo);
if (!FLAG_suppress_asm_messages && FLAG_trace_asm_time) {
MessageHandler::ReportMessage(info->isolate(), &location, message);
}
return result; return result;
} }
bool AsmJs::IsStdlibValid(i::Isolate* isolate, Handle<FixedArray> wasm_data, bool AsmJs::IsStdlibValid(i::Isolate* isolate, Handle<FixedArray> wasm_data,
Handle<JSReceiver> stdlib) { Handle<JSReceiver> stdlib) {
i::Handle<i::FixedArray> uses(i::FixedArray::cast(wasm_data->get(2))); i::Handle<i::FixedArray> uses(
i::FixedArray::cast(wasm_data->get(kWasmDataUsesArray)));
for (int i = 0; i < uses->length(); ++i) { for (int i = 0; i < uses->length(); ++i) {
if (!IsStdlibMemberValid(isolate, stdlib, if (!IsStdlibMemberValid(isolate, stdlib,
uses->GetValueChecked<i::Object>(isolate, i))) { uses->GetValueChecked<i::Object>(isolate, i))) {
@ -204,14 +252,27 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(i::Isolate* isolate,
Handle<FixedArray> wasm_data, Handle<FixedArray> wasm_data,
Handle<JSArrayBuffer> memory, Handle<JSArrayBuffer> memory,
Handle<JSReceiver> foreign) { Handle<JSReceiver> foreign) {
i::Handle<i::JSObject> module(i::JSObject::cast(wasm_data->get(0))); base::ElapsedTimer instantiate_timer;
instantiate_timer.Start();
i::Handle<i::WasmModuleObject> module(
i::WasmModuleObject::cast(wasm_data->get(kWasmDataCompiledModule)));
i::Handle<i::FixedArray> foreign_globals( i::Handle<i::FixedArray> foreign_globals(
i::FixedArray::cast(wasm_data->get(1))); i::FixedArray::cast(wasm_data->get(kWasmDataForeignGlobals)));
ErrorThrower thrower(isolate, "Asm.js -> WebAssembly instantiation"); ErrorThrower thrower(isolate, "Asm.js -> WebAssembly instantiation");
// Create the ffi object for foreign functions {"": foreign}.
Handle<JSObject> ffi_object;
if (!foreign.is_null()) {
Handle<JSFunction> object_function = Handle<JSFunction>(
isolate->native_context()->object_function(), isolate);
ffi_object = isolate->factory()->NewJSObject(object_function);
JSObject::AddProperty(ffi_object, isolate->factory()->empty_string(),
foreign, NONE);
}
i::MaybeHandle<i::JSObject> maybe_module_object = i::MaybeHandle<i::JSObject> maybe_module_object =
i::wasm::WasmModule::Instantiate(isolate, &thrower, module, foreign, i::wasm::WasmModule::Instantiate(isolate, &thrower, module, ffi_object,
memory); memory);
if (maybe_module_object.is_null()) { if (maybe_module_object.is_null()) {
return MaybeHandle<Object>(); return MaybeHandle<Object>();
@ -258,6 +319,32 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(i::Isolate* isolate,
!single_function.ToHandleChecked()->IsUndefined(isolate)) { !single_function.ToHandleChecked()->IsUndefined(isolate)) {
return single_function; return single_function;
} }
i::Handle<i::Script> script(i::Script::cast(wasm_data->get(kWasmDataScript)));
int32_t position = 0;
if (!wasm_data->get(kWasmDataScriptPosition)->ToInt32(&position)) {
UNREACHABLE();
}
MessageLocation location(script, position, position);
char text[50];
int length;
if (FLAG_predictable) {
length = base::OS::SNPrintF(text, arraysize(text), "success");
} else {
length = base::OS::SNPrintF(text, arraysize(text), "success, %0.3f ms",
instantiate_timer.Elapsed().InMillisecondsF());
}
DCHECK_NE(-1, length);
USE(length);
Handle<String> stext(isolate->factory()->InternalizeUtf8String(text));
Handle<JSMessageObject> message = MessageHandler::MakeMessageObject(
isolate, MessageTemplate::kAsmJsInstantiated, &location, stext,
Handle<JSArray>::null());
message->set_error_level(v8::Isolate::kMessageInfo);
if (!FLAG_suppress_asm_messages && FLAG_trace_asm_time) {
MessageHandler::ReportMessage(isolate, &location, message);
}
return module_object; return module_object;
} }

View File

@ -10,13 +10,13 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
class CompilationInfo;
class JSArrayBuffer; class JSArrayBuffer;
class ParseInfo;
// Interface to compile and instantiate for asmjs. // Interface to compile and instantiate for asmjs.
class AsmJs { class AsmJs {
public: public:
static MaybeHandle<FixedArray> ConvertAsmToWasm(ParseInfo* info); static MaybeHandle<FixedArray> CompileAsmViaWasm(CompilationInfo* info);
static bool IsStdlibValid(Isolate* isolate, Handle<FixedArray> wasm_data, static bool IsStdlibValid(Isolate* isolate, Handle<FixedArray> wasm_data,
Handle<JSReceiver> stdlib); Handle<JSReceiver> stdlib);
static MaybeHandle<Object> InstantiateAsmWasm(Isolate* isolate, static MaybeHandle<Object> InstantiateAsmWasm(Isolate* isolate,

View File

@ -9,6 +9,7 @@
#include <memory> #include <memory>
#include <string> #include <string>
#include "include/v8.h"
#include "src/v8.h" #include "src/v8.h"
#include "src/asmjs/asm-types.h" #include "src/asmjs/asm-types.h"
@ -17,18 +18,33 @@
#include "src/base/bits.h" #include "src/base/bits.h"
#include "src/codegen.h" #include "src/codegen.h"
#include "src/globals.h" #include "src/globals.h"
#include "src/messages.h"
#include "src/utils.h" #include "src/utils.h"
#include "src/vector.h"
#define FAIL(node, msg) \ #define FAIL_LOCATION_RAW(location, msg) \
do { \ do { \
int line = node->position() == kNoSourcePosition \ Handle<String> message( \
? -1 \ isolate_->factory()->InternalizeOneByteString(msg)); \
: script_->GetLineNumber(node->position()); \ error_message_ = MessageHandler::MakeMessageObject( \
base::OS::SNPrintF(error_message_, sizeof(error_message_), \ isolate_, MessageTemplate::kAsmJsInvalid, (location), message, \
"asm: line %d: %s\n", line + 1, msg); \ Handle<JSArray>::null()); \
return AsmType::None(); \ error_message_->set_error_level(v8::Isolate::kMessageWarning); \
message_location_ = *(location); \
return AsmType::None(); \
} while (false) } while (false)
#define FAIL_RAW(node, msg) \
do { \
MessageLocation location(script_, node->position(), node->position()); \
FAIL_LOCATION_RAW(&location, msg); \
} while (false)
#define FAIL_LOCATION(location, msg) \
FAIL_LOCATION_RAW(location, STATIC_CHAR_VECTOR(msg))
#define FAIL(node, msg) FAIL_RAW(node, STATIC_CHAR_VECTOR(msg))
#define RECURSE(call) \ #define RECURSE(call) \
do { \ do { \
if (GetCurrentStackPosition() < stack_limit_) { \ if (GetCurrentStackPosition() < stack_limit_) { \
@ -90,6 +106,53 @@ Statement* AsmTyper::FlattenedStatements::Next() {
} }
} }
// ----------------------------------------------------------------------------
// Implementation of AsmTyper::SourceLayoutTracker
bool AsmTyper::SourceLayoutTracker::IsValid() const {
const Section* kAllSections[] = {&use_asm_, &globals_, &functions_, &tables_,
&exports_};
for (size_t ii = 0; ii < arraysize(kAllSections); ++ii) {
const auto& curr_section = *kAllSections[ii];
for (size_t jj = ii + 1; jj < arraysize(kAllSections); ++jj) {
if (curr_section.IsPrecededBy(*kAllSections[jj])) {
return false;
}
}
}
return true;
}
void AsmTyper::SourceLayoutTracker::Section::AddNewElement(
const AstNode& node) {
const int node_pos = node.position();
if (start_ == kNoSourcePosition) {
start_ = node_pos;
} else {
start_ = std::min(start_, node_pos);
}
if (end_ == kNoSourcePosition) {
end_ = node_pos;
} else {
end_ = std::max(end_, node_pos);
}
}
bool AsmTyper::SourceLayoutTracker::Section::IsPrecededBy(
const Section& other) const {
if (start_ == kNoSourcePosition) {
DCHECK_EQ(end_, kNoSourcePosition);
return false;
}
if (other.start_ == kNoSourcePosition) {
DCHECK_EQ(other.end_, kNoSourcePosition);
return false;
}
DCHECK_LE(start_, end_);
DCHECK_LE(other.start_, other.end_);
return other.start_ <= end_;
}
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Implementation of AsmTyper::VariableInfo // Implementation of AsmTyper::VariableInfo
@ -112,16 +175,16 @@ AsmTyper::VariableInfo* AsmTyper::VariableInfo::Clone(Zone* zone) const {
return new_var_info; return new_var_info;
} }
void AsmTyper::VariableInfo::FirstForwardUseIs(VariableProxy* var) { void AsmTyper::VariableInfo::SetFirstForwardUse(
DCHECK(first_forward_use_ == nullptr); const MessageLocation& source_location) {
missing_definition_ = true; missing_definition_ = true;
first_forward_use_ = var; source_location_ = source_location;
} }
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Implementation of AsmTyper // Implementation of AsmTyper
AsmTyper::AsmTyper(Isolate* isolate, Zone* zone, Script* script, AsmTyper::AsmTyper(Isolate* isolate, Zone* zone, Handle<Script> script,
FunctionLiteral* root) FunctionLiteral* root)
: isolate_(isolate), : isolate_(isolate),
zone_(zone), zone_(zone),
@ -137,9 +200,9 @@ AsmTyper::AsmTyper(Isolate* isolate, Zone* zone, Script* script,
local_scope_(ZoneHashMap::kDefaultHashMapCapacity, local_scope_(ZoneHashMap::kDefaultHashMapCapacity,
ZoneAllocationPolicy(zone)), ZoneAllocationPolicy(zone)),
stack_limit_(isolate->stack_guard()->real_climit()), stack_limit_(isolate->stack_guard()->real_climit()),
node_types_(zone_),
fround_type_(AsmType::FroundType(zone_)), fround_type_(AsmType::FroundType(zone_)),
ffi_type_(AsmType::FFIType(zone_)) { ffi_type_(AsmType::FFIType(zone_)),
function_pointer_tables_(zone_) {
InitializeStdlib(); InitializeStdlib();
} }
@ -283,6 +346,9 @@ void AsmTyper::InitializeStdlib() {
AsmTyper::VariableInfo* AsmTyper::ImportLookup(Property* import) { AsmTyper::VariableInfo* AsmTyper::ImportLookup(Property* import) {
auto* obj = import->obj(); auto* obj = import->obj();
auto* key = import->key()->AsLiteral(); auto* key = import->key()->AsLiteral();
if (key == nullptr) {
return nullptr;
}
ObjectTypeMap* stdlib = &stdlib_types_; ObjectTypeMap* stdlib = &stdlib_types_;
if (auto* obj_as_property = obj->AsProperty()) { if (auto* obj_as_property = obj->AsProperty()) {
@ -345,7 +411,8 @@ AsmTyper::VariableInfo* AsmTyper::Lookup(Variable* variable) const {
} }
void AsmTyper::AddForwardReference(VariableProxy* proxy, VariableInfo* info) { void AsmTyper::AddForwardReference(VariableProxy* proxy, VariableInfo* info) {
info->FirstForwardUseIs(proxy); MessageLocation location(script_, proxy->position(), proxy->position());
info->SetFirstForwardUse(location);
forward_definitions_.push_back(info); forward_definitions_.push_back(info);
} }
@ -390,22 +457,58 @@ bool AsmTyper::AddLocal(Variable* variable, VariableInfo* info) {
void AsmTyper::SetTypeOf(AstNode* node, AsmType* type) { void AsmTyper::SetTypeOf(AstNode* node, AsmType* type) {
DCHECK_NE(type, AsmType::None()); DCHECK_NE(type, AsmType::None());
DCHECK(node_types_.find(node) == node_types_.end()); if (in_function_) {
node_types_.insert(std::make_pair(node, type)); DCHECK(function_node_types_.find(node) == function_node_types_.end());
function_node_types_.insert(std::make_pair(node, type));
} else {
DCHECK(module_node_types_.find(node) == module_node_types_.end());
module_node_types_.insert(std::make_pair(node, type));
}
} }
namespace {
bool IsLiteralDouble(Literal* literal) {
return literal->raw_value()->IsNumber() &&
literal->raw_value()->ContainsDot();
}
bool IsLiteralInt(Literal* literal) {
return literal->raw_value()->IsNumber() &&
!literal->raw_value()->ContainsDot();
}
bool IsLiteralMinus1(Literal* literal) {
return IsLiteralInt(literal) && literal->raw_value()->AsNumber() == -1.0;
}
bool IsLiteral1Dot0(Literal* literal) {
return IsLiteralDouble(literal) && literal->raw_value()->AsNumber() == 1.0;
}
bool IsLiteral0(Literal* literal) {
return IsLiteralInt(literal) && literal->raw_value()->AsNumber() == 0.0;
}
} // namespace
AsmType* AsmTyper::TypeOf(AstNode* node) const { AsmType* AsmTyper::TypeOf(AstNode* node) const {
auto node_type_iter = node_types_.find(node); auto node_type_iter = function_node_types_.find(node);
if (node_type_iter != node_types_.end()) { if (node_type_iter != function_node_types_.end()) {
return node_type_iter->second;
}
node_type_iter = module_node_types_.find(node);
if (node_type_iter != module_node_types_.end()) {
return node_type_iter->second; return node_type_iter->second;
} }
// Sometimes literal nodes are not added to the node_type_ map simply because // Sometimes literal nodes are not added to the node_type_ map simply because
// their are not visited with ValidateExpression(). // their are not visited with ValidateExpression().
if (auto* literal = node->AsLiteral()) { if (auto* literal = node->AsLiteral()) {
if (literal->raw_value()->ContainsDot()) { if (IsLiteralDouble(literal)) {
return AsmType::Double(); return AsmType::Double();
} }
if (!IsLiteralInt(literal)) {
return AsmType::None();
}
uint32_t u; uint32_t u;
if (literal->value()->ToUint32(&u)) { if (literal->value()->ToUint32(&u)) {
if (u > LargestFixNum) { if (u > LargestFixNum) {
@ -433,13 +536,39 @@ AsmTyper::StandardMember AsmTyper::VariableAsStandardMember(Variable* var) {
return member; return member;
} }
AsmType* AsmTyper::FailWithMessage(const char* text) {
FAIL_RAW(root_, OneByteVector(text));
}
bool AsmTyper::Validate() { bool AsmTyper::Validate() {
if (!AsmType::None()->IsExactly(ValidateModule(root_))) { return ValidateBeforeFunctionsPhase() &&
!AsmType::None()->IsExactly(ValidateModuleFunctions(root_)) &&
ValidateAfterFunctionsPhase();
}
bool AsmTyper::ValidateBeforeFunctionsPhase() {
if (!AsmType::None()->IsExactly(ValidateModuleBeforeFunctionsPhase(root_))) {
return true; return true;
} }
return false; return false;
} }
bool AsmTyper::ValidateInnerFunction(FunctionDeclaration* fun_decl) {
if (!AsmType::None()->IsExactly(ValidateModuleFunction(fun_decl))) {
return true;
}
return false;
}
bool AsmTyper::ValidateAfterFunctionsPhase() {
if (!AsmType::None()->IsExactly(ValidateModuleAfterFunctionsPhase(root_))) {
return true;
}
return false;
}
void AsmTyper::ClearFunctionNodeTypes() { function_node_types_.clear(); }
namespace { namespace {
bool IsUseAsmDirective(Statement* first_statement) { bool IsUseAsmDirective(Statement* first_statement) {
ExpressionStatement* use_asm = first_statement->AsExpressionStatement(); ExpressionStatement* use_asm = first_statement->AsExpressionStatement();
@ -477,91 +606,12 @@ Assignment* ExtractInitializerExpression(Statement* statement) {
} // namespace } // namespace
// 6.1 ValidateModule // 6.1 ValidateModule
namespace { AsmType* AsmTyper::ValidateModuleBeforeFunctionsPhase(FunctionLiteral* fun) {
// SourceLayoutTracker keeps track of the start and end positions of each
// section in the asm.js source. The sections should not overlap, otherwise the
// asm.js source is invalid.
class SourceLayoutTracker {
public:
SourceLayoutTracker() = default;
bool IsValid() const {
const Section* kAllSections[] = {&use_asm_, &globals_, &functions_,
&tables_, &exports_};
for (size_t ii = 0; ii < arraysize(kAllSections); ++ii) {
const auto& curr_section = *kAllSections[ii];
for (size_t jj = ii + 1; jj < arraysize(kAllSections); ++jj) {
if (curr_section.OverlapsWith(*kAllSections[jj])) {
return false;
}
}
}
return true;
}
void AddUseAsm(const AstNode& node) { use_asm_.AddNewElement(node); }
void AddGlobal(const AstNode& node) { globals_.AddNewElement(node); }
void AddFunction(const AstNode& node) { functions_.AddNewElement(node); }
void AddTable(const AstNode& node) { tables_.AddNewElement(node); }
void AddExport(const AstNode& node) { exports_.AddNewElement(node); }
private:
class Section {
public:
Section() = default;
Section(const Section&) = default;
Section& operator=(const Section&) = default;
void AddNewElement(const AstNode& node) {
const int node_pos = node.position();
if (start_ == kNoSourcePosition) {
start_ = node_pos;
} else {
start_ = std::max(start_, node_pos);
}
if (end_ == kNoSourcePosition) {
end_ = node_pos;
} else {
end_ = std::max(end_, node_pos);
}
}
bool OverlapsWith(const Section& other) const {
if (start_ == kNoSourcePosition) {
DCHECK_EQ(end_, kNoSourcePosition);
return false;
}
if (other.start_ == kNoSourcePosition) {
DCHECK_EQ(other.end_, kNoSourcePosition);
return false;
}
return other.start_ < end_ || other.end_ < start_;
}
private:
int start_ = kNoSourcePosition;
int end_ = kNoSourcePosition;
};
Section use_asm_;
Section globals_;
Section functions_;
Section tables_;
Section exports_;
DISALLOW_COPY_AND_ASSIGN(SourceLayoutTracker);
};
} // namespace
AsmType* AsmTyper::ValidateModule(FunctionLiteral* fun) {
SourceLayoutTracker source_layout;
DeclarationScope* scope = fun->scope(); DeclarationScope* scope = fun->scope();
if (!scope->is_function_scope()) FAIL(fun, "Not at function scope."); if (!scope->is_function_scope()) FAIL(fun, "Not at function scope.");
if (scope->inner_scope_calls_eval()) {
FAIL(fun, "Invalid asm.js module using eval.");
}
if (!ValidAsmIdentifier(fun->name())) if (!ValidAsmIdentifier(fun->name()))
FAIL(fun, "Invalid asm.js identifier in module name."); FAIL(fun, "Invalid asm.js identifier in module name.");
module_name_ = fun->name(); module_name_ = fun->name();
@ -594,7 +644,6 @@ AsmType* AsmTyper::ValidateModule(FunctionLiteral* fun) {
} }
} }
ZoneVector<Assignment*> function_pointer_tables(zone_);
FlattenedStatements iter(zone_, fun->body()); FlattenedStatements iter(zone_, fun->body());
auto* use_asm_directive = iter.Next(); auto* use_asm_directive = iter.Next();
if (use_asm_directive == nullptr) { if (use_asm_directive == nullptr) {
@ -616,8 +665,8 @@ AsmType* AsmTyper::ValidateModule(FunctionLiteral* fun) {
if (!IsUseAsmDirective(use_asm_directive)) { if (!IsUseAsmDirective(use_asm_directive)) {
FAIL(fun, "Missing \"use asm\"."); FAIL(fun, "Missing \"use asm\".");
} }
source_layout.AddUseAsm(*use_asm_directive); source_layout_.AddUseAsm(*use_asm_directive);
ReturnStatement* module_return = nullptr; module_return_ = nullptr;
// *VIOLATION* The spec states that globals should be followed by function // *VIOLATION* The spec states that globals should be followed by function
// declarations, which should be followed by function pointer tables, followed // declarations, which should be followed by function pointer tables, followed
@ -627,40 +676,57 @@ AsmType* AsmTyper::ValidateModule(FunctionLiteral* fun) {
if (auto* assign = ExtractInitializerExpression(current)) { if (auto* assign = ExtractInitializerExpression(current)) {
if (assign->value()->IsArrayLiteral()) { if (assign->value()->IsArrayLiteral()) {
// Save function tables for later validation. // Save function tables for later validation.
function_pointer_tables.push_back(assign); function_pointer_tables_.push_back(assign);
} else { } else {
RECURSE(ValidateGlobalDeclaration(assign)); RECURSE(ValidateGlobalDeclaration(assign));
source_layout.AddGlobal(*assign); source_layout_.AddGlobal(*assign);
} }
continue; continue;
} }
if (auto* current_as_return = current->AsReturnStatement()) { if (auto* current_as_return = current->AsReturnStatement()) {
if (module_return != nullptr) { if (module_return_ != nullptr) {
FAIL(fun, "Multiple export statements."); FAIL(fun, "Multiple export statements.");
} }
module_return = current_as_return; module_return_ = current_as_return;
source_layout.AddExport(*module_return); source_layout_.AddExport(*module_return_);
continue; continue;
} }
FAIL(current, "Invalid top-level statement in asm.js module."); FAIL(current, "Invalid top-level statement in asm.js module.");
} }
return AsmType::Int(); // Any type that is not AsmType::None();
}
AsmType* AsmTyper::ValidateModuleFunction(FunctionDeclaration* fun_decl) {
RECURSE(ValidateFunction(fun_decl));
source_layout_.AddFunction(*fun_decl);
return AsmType::Int(); // Any type that is not AsmType::None();
}
AsmType* AsmTyper::ValidateModuleFunctions(FunctionLiteral* fun) {
DeclarationScope* scope = fun->scope();
Declaration::List* decls = scope->declarations(); Declaration::List* decls = scope->declarations();
for (Declaration* decl : *decls) { for (Declaration* decl : *decls) {
if (FunctionDeclaration* fun_decl = decl->AsFunctionDeclaration()) { if (FunctionDeclaration* fun_decl = decl->AsFunctionDeclaration()) {
RECURSE(ValidateFunction(fun_decl)); RECURSE(ValidateModuleFunction(fun_decl));
source_layout.AddFunction(*fun_decl);
continue; continue;
} }
} }
for (auto* function_table : function_pointer_tables) { return AsmType::Int(); // Any type that is not AsmType::None();
}
AsmType* AsmTyper::ValidateModuleAfterFunctionsPhase(FunctionLiteral* fun) {
for (auto* function_table : function_pointer_tables_) {
RECURSE(ValidateFunctionTable(function_table)); RECURSE(ValidateFunctionTable(function_table));
source_layout.AddTable(*function_table); source_layout_.AddTable(*function_table);
} }
DeclarationScope* scope = fun->scope();
Declaration::List* decls = scope->declarations();
for (Declaration* decl : *decls) { for (Declaration* decl : *decls) {
if (decl->IsFunctionDeclaration()) { if (decl->IsFunctionDeclaration()) {
continue; continue;
@ -682,20 +748,20 @@ AsmType* AsmTyper::ValidateModule(FunctionLiteral* fun) {
} }
// 6.2 ValidateExport // 6.2 ValidateExport
if (module_return == nullptr) { if (module_return_ == nullptr) {
FAIL(fun, "Missing asm.js module export."); FAIL(fun, "Missing asm.js module export.");
} }
for (auto* forward_def : forward_definitions_) { for (auto* forward_def : forward_definitions_) {
if (forward_def->missing_definition()) { if (forward_def->missing_definition()) {
FAIL(forward_def->first_forward_use(), FAIL_LOCATION(forward_def->source_location(),
"Missing definition for forward declared identifier."); "Missing definition for forward declared identifier.");
} }
} }
RECURSE(ValidateExport(module_return)); RECURSE(ValidateExport(module_return_));
if (!source_layout.IsValid()) { if (!source_layout_.IsValid()) {
FAIL(fun, "Invalid asm.js source code layout."); FAIL(fun, "Invalid asm.js source code layout.");
} }
@ -714,8 +780,7 @@ bool IsDoubleAnnotation(BinaryOperation* binop) {
return false; return false;
} }
return right_as_literal->raw_value()->ContainsDot() && return IsLiteral1Dot0(right_as_literal);
right_as_literal->raw_value()->AsNumber() == 1.0;
} }
bool IsIntAnnotation(BinaryOperation* binop) { bool IsIntAnnotation(BinaryOperation* binop) {
@ -728,8 +793,7 @@ bool IsIntAnnotation(BinaryOperation* binop) {
return false; return false;
} }
return !right_as_literal->raw_value()->ContainsDot() && return IsLiteral0(right_as_literal);
right_as_literal->raw_value()->AsNumber() == 0.0;
} }
} // namespace } // namespace
@ -894,6 +958,10 @@ AsmType* AsmTyper::ExportType(VariableProxy* fun_export) {
FAIL(fun_export, "Module export is not an asm.js function."); FAIL(fun_export, "Module export is not an asm.js function.");
} }
if (!fun_export->var()->is_function()) {
FAIL(fun_export, "Module exports must be function declarations.");
}
return type; return type;
} }
@ -915,6 +983,10 @@ AsmType* AsmTyper::ValidateExport(ReturnStatement* exports) {
"Only normal object properties may be used in the export object " "Only normal object properties may be used in the export object "
"literal."); "literal.");
} }
if (!prop->key()->AsLiteral()->IsPropertyName()) {
FAIL(prop->key(),
"Exported functions must have valid identifier names.");
}
auto* export_obj = prop->value()->AsVariableProxy(); auto* export_obj = prop->value()->AsVariableProxy();
if (export_obj == nullptr) { if (export_obj == nullptr) {
@ -1091,6 +1163,7 @@ AsmType* AsmTyper::ValidateFunction(FunctionDeclaration* fun_decl) {
parameter_types.push_back(type); parameter_types.push_back(type);
SetTypeOf(proxy, type); SetTypeOf(proxy, type);
SetTypeOf(expr, type); SetTypeOf(expr, type);
SetTypeOf(expr->value(), type);
} }
if (static_cast<int>(annotated_parameters) != fun->parameter_count()) { if (static_cast<int>(annotated_parameters) != fun->parameter_count()) {
@ -1442,7 +1515,7 @@ bool ExtractInt32CaseLabel(CaseClause* clause, int32_t* lbl) {
return false; return false;
} }
if (lbl_expr->raw_value()->ContainsDot()) { if (!IsLiteralInt(lbl_expr)) {
return false; return false;
} }
@ -1539,8 +1612,7 @@ bool IsInvert(BinaryOperation* binop) {
return false; return false;
} }
return !right_as_literal->raw_value()->ContainsDot() && return IsLiteralMinus1(right_as_literal);
right_as_literal->raw_value()->AsNumber() == -1.0;
} }
bool IsUnaryMinus(BinaryOperation* binop) { bool IsUnaryMinus(BinaryOperation* binop) {
@ -1554,8 +1626,7 @@ bool IsUnaryMinus(BinaryOperation* binop) {
return false; return false;
} }
return !right_as_literal->raw_value()->ContainsDot() && return IsLiteralMinus1(right_as_literal);
right_as_literal->raw_value()->AsNumber() == -1.0;
} }
} // namespace } // namespace
@ -1684,7 +1755,7 @@ AsmType* AsmTyper::ValidateNumericLiteral(Literal* literal) {
return AsmType::Void(); return AsmType::Void();
} }
if (literal->raw_value()->ContainsDot()) { if (IsLiteralDouble(literal)) {
return AsmType::Double(); return AsmType::Double();
} }
@ -1864,7 +1935,7 @@ bool IsIntishLiteralFactor(Expression* expr, int32_t* factor) {
return false; return false;
} }
if (literal->raw_value()->ContainsDot()) { if (!IsLiteralInt(literal)) {
return false; return false;
} }
@ -2204,12 +2275,12 @@ AsmType* AsmTyper::ValidateBitwiseORExpression(BinaryOperation* binop) {
RECURSE(type = ValidateCall(AsmType::Signed(), left_as_call)); RECURSE(type = ValidateCall(AsmType::Signed(), left_as_call));
return type; return type;
} }
AsmType* left_type;
// TODO(jpp): at this point we know that binop is expr|0. We could sinply RECURSE(left_type = ValidateExpression(left));
// if (!left_type->IsA(AsmType::Intish())) {
// RECURSE(t = ValidateExpression(left)); FAIL(left, "Left side of |0 annotation must be intish.");
// FAIL_IF(t->IsNotA(Intish)); }
// return Signed; return AsmType::Signed();
} }
auto* right = binop->right(); auto* right = binop->right();
@ -2273,7 +2344,7 @@ bool ExtractIndirectCallMask(Expression* expr, uint32_t* value) {
return false; return false;
} }
if (as_literal->raw_value()->ContainsDot()) { if (!IsLiteralInt(as_literal)) {
return false; return false;
} }
@ -2329,6 +2400,9 @@ AsmType* AsmTyper::ValidateCall(AsmType* return_type, Call* call) {
DCHECK(false); DCHECK(false);
FAIL(call, "Redeclared global identifier."); FAIL(call, "Redeclared global identifier.");
} }
if (call->GetCallType() != Call::OTHER_CALL) {
FAIL(call, "Invalid call of existing global function.");
}
SetTypeOf(call_var_proxy, reinterpret_cast<AsmType*>(call_type)); SetTypeOf(call_var_proxy, reinterpret_cast<AsmType*>(call_type));
SetTypeOf(call, return_type); SetTypeOf(call, return_type);
return return_type; return return_type;
@ -2359,6 +2433,10 @@ AsmType* AsmTyper::ValidateCall(AsmType* return_type, Call* call) {
FAIL(call, "Function invocation does not match function type."); FAIL(call, "Function invocation does not match function type.");
} }
if (call->GetCallType() != Call::OTHER_CALL) {
FAIL(call, "Invalid forward call of global function.");
}
SetTypeOf(call_var_proxy, call_var_info->type()); SetTypeOf(call_var_proxy, call_var_info->type());
SetTypeOf(call, return_type); SetTypeOf(call, return_type);
return return_type; return return_type;
@ -2417,6 +2495,9 @@ AsmType* AsmTyper::ValidateCall(AsmType* return_type, Call* call) {
DCHECK(false); DCHECK(false);
FAIL(call, "Redeclared global identifier."); FAIL(call, "Redeclared global identifier.");
} }
if (call->GetCallType() != Call::KEYED_PROPERTY_CALL) {
FAIL(call, "Invalid call of existing function table.");
}
SetTypeOf(call_property, reinterpret_cast<AsmType*>(call_type)); SetTypeOf(call_property, reinterpret_cast<AsmType*>(call_type));
SetTypeOf(call, return_type); SetTypeOf(call, return_type);
return return_type; return return_type;
@ -2441,6 +2522,9 @@ AsmType* AsmTyper::ValidateCall(AsmType* return_type, Call* call) {
"signature."); "signature.");
} }
if (call->GetCallType() != Call::KEYED_PROPERTY_CALL) {
FAIL(call, "Invalid forward call of function table.");
}
SetTypeOf(call_property, previous_type->signature()); SetTypeOf(call_property, previous_type->signature());
SetTypeOf(call, return_type); SetTypeOf(call, return_type);
return return_type; return return_type;
@ -2457,7 +2541,7 @@ bool ExtractHeapAccessShift(Expression* expr, uint32_t* value) {
return false; return false;
} }
if (as_literal->raw_value()->ContainsDot()) { if (!IsLiteralInt(as_literal)) {
return false; return false;
} }
@ -2501,7 +2585,7 @@ AsmType* AsmTyper::ValidateHeapAccess(Property* heap,
SetTypeOf(obj, obj_type); SetTypeOf(obj, obj_type);
if (auto* key_as_literal = heap->key()->AsLiteral()) { if (auto* key_as_literal = heap->key()->AsLiteral()) {
if (key_as_literal->raw_value()->ContainsDot()) { if (!IsLiteralInt(key_as_literal)) {
FAIL(key_as_literal, "Heap access index must be int."); FAIL(key_as_literal, "Heap access index must be int.");
} }
@ -2685,9 +2769,9 @@ AsmType* AsmTyper::ReturnTypeAnnotations(ReturnStatement* statement) {
if (auto* literal = ret_expr->AsLiteral()) { if (auto* literal = ret_expr->AsLiteral()) {
int32_t _; int32_t _;
if (literal->raw_value()->ContainsDot()) { if (IsLiteralDouble(literal)) {
return AsmType::Double(); return AsmType::Double();
} else if (literal->value()->ToInt32(&_)) { } else if (IsLiteralInt(literal) && literal->value()->ToInt32(&_)) {
return AsmType::Signed(); return AsmType::Signed();
} else if (literal->IsUndefinedLiteral()) { } else if (literal->IsUndefinedLiteral()) {
// *VIOLATION* The parser changes // *VIOLATION* The parser changes
@ -2728,13 +2812,15 @@ AsmType* AsmTyper::ReturnTypeAnnotations(ReturnStatement* statement) {
AsmType* AsmTyper::VariableTypeAnnotations( AsmType* AsmTyper::VariableTypeAnnotations(
Expression* initializer, VariableInfo::Mutability mutability_type) { Expression* initializer, VariableInfo::Mutability mutability_type) {
if (auto* literal = initializer->AsLiteral()) { if (auto* literal = initializer->AsLiteral()) {
if (literal->raw_value()->ContainsDot()) { if (IsLiteralDouble(literal)) {
SetTypeOf(initializer, AsmType::Double()); SetTypeOf(initializer, AsmType::Double());
return AsmType::Double(); return AsmType::Double();
} }
if (!IsLiteralInt(literal)) {
FAIL(initializer, "Invalid type annotation - forbidden literal.");
}
int32_t i32; int32_t i32;
uint32_t u32; uint32_t u32;
AsmType* initializer_type = nullptr; AsmType* initializer_type = nullptr;
if (literal->value()->ToUint32(&u32)) { if (literal->value()->ToUint32(&u32)) {
if (u32 > LargestFixNum) { if (u32 > LargestFixNum) {
@ -2793,13 +2879,17 @@ AsmType* AsmTyper::VariableTypeAnnotations(
"to fround."); "to fround.");
} }
// Float constants must contain dots in local, but not in globals. // ERRATA: 5.4
if (mutability_type == VariableInfo::kLocal) { // According to the spec: float constants must contain dots in local,
if (!src_expr->raw_value()->ContainsDot()) { // but not in globals.
FAIL(initializer, // However, the errata doc (and actual programs), use integer values
"Invalid float type annotation - expected literal argument to be a " // with fround(..).
"floating point literal."); // Skipping the check that would go here to enforce this.
} // Checking instead the literal expression is at least a number.
if (!src_expr->raw_value()->IsNumber()) {
FAIL(initializer,
"Invalid float type annotation - expected numeric literal for call "
"to fround.");
} }
return AsmType::Float(); return AsmType::Float();
@ -2848,19 +2938,6 @@ AsmType* AsmTyper::NewHeapView(CallNew* new_heap_view) {
return heap_view_info->type(); return heap_view_info->type();
} }
bool IsValidAsm(Isolate* isolate, Zone* zone, Script* script,
FunctionLiteral* root, std::string* error_message) {
error_message->clear();
AsmTyper typer(isolate, zone, script, root);
if (typer.Validate()) {
return true;
}
*error_message = typer.error_message();
return false;
}
} // namespace wasm } // namespace wasm
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -7,6 +7,7 @@
#include <cstdint> #include <cstdint>
#include <string> #include <string>
#include <unordered_map>
#include <unordered_set> #include <unordered_set>
#include "src/allocation.h" #include "src/allocation.h"
@ -15,6 +16,7 @@
#include "src/ast/ast-types.h" #include "src/ast/ast-types.h"
#include "src/ast/ast.h" #include "src/ast/ast.h"
#include "src/effects.h" #include "src/effects.h"
#include "src/messages.h"
#include "src/type-info.h" #include "src/type-info.h"
#include "src/zone/zone-containers.h" #include "src/zone/zone-containers.h"
#include "src/zone/zone.h" #include "src/zone/zone.h"
@ -25,6 +27,7 @@ namespace wasm {
class AsmType; class AsmType;
class AsmTyperHarnessBuilder; class AsmTyperHarnessBuilder;
class SourceLayoutTracker;
class AsmTyper final { class AsmTyper final {
public: public:
@ -66,16 +69,27 @@ class AsmTyper final {
}; };
~AsmTyper() = default; ~AsmTyper() = default;
AsmTyper(Isolate* isolate, Zone* zone, Script* script, FunctionLiteral* root); AsmTyper(Isolate* isolate, Zone* zone, Handle<Script> script,
FunctionLiteral* root);
bool Validate(); bool Validate();
// Do asm.js validation in phases (to interleave with conversion to wasm).
bool ValidateBeforeFunctionsPhase();
bool ValidateInnerFunction(FunctionDeclaration* decl);
bool ValidateAfterFunctionsPhase();
void ClearFunctionNodeTypes();
const char* error_message() const { return error_message_; } Handle<JSMessageObject> error_message() const { return error_message_; }
const MessageLocation* message_location() const { return &message_location_; }
AsmType* TypeOf(AstNode* node) const; AsmType* TypeOf(AstNode* node) const;
AsmType* TypeOf(Variable* v) const; AsmType* TypeOf(Variable* v) const;
StandardMember VariableAsStandardMember(Variable* var); StandardMember VariableAsStandardMember(Variable* var);
// Allow the asm-wasm-builder to trigger failures (for interleaved
// validating).
AsmType* FailWithMessage(const char* text);
typedef std::unordered_set<StandardMember, std::hash<int> > StdlibSet; typedef std::unordered_set<StandardMember, std::hash<int> > StdlibSet;
StdlibSet StdlibUses() const { return stdlib_uses_; } StdlibSet StdlibUses() const { return stdlib_uses_; }
@ -130,7 +144,7 @@ class AsmTyper final {
bool IsHeap() const { return standard_member_ == kHeap; } bool IsHeap() const { return standard_member_ == kHeap; }
void MarkDefined() { missing_definition_ = false; } void MarkDefined() { missing_definition_ = false; }
void FirstForwardUseIs(VariableProxy* var); void SetFirstForwardUse(const MessageLocation& source_location);
StandardMember standard_member() const { return standard_member_; } StandardMember standard_member() const { return standard_member_; }
void set_standard_member(StandardMember standard_member) { void set_standard_member(StandardMember standard_member) {
@ -145,7 +159,7 @@ class AsmTyper final {
bool missing_definition() const { return missing_definition_; } bool missing_definition() const { return missing_definition_; }
VariableProxy* first_forward_use() const { return first_forward_use_; } const MessageLocation* source_location() { return &source_location_; }
static VariableInfo* ForSpecialSymbol(Zone* zone, static VariableInfo* ForSpecialSymbol(Zone* zone,
StandardMember standard_member); StandardMember standard_member);
@ -157,9 +171,8 @@ class AsmTyper final {
// missing_definition_ is set to true for forward definition - i.e., use // missing_definition_ is set to true for forward definition - i.e., use
// before definition. // before definition.
bool missing_definition_ = false; bool missing_definition_ = false;
// first_forward_use_ holds the AST node that first referenced this // Used for error messages.
// VariableInfo. Used for error messages. MessageLocation source_location_;
VariableProxy* first_forward_use_ = nullptr;
}; };
// RAII-style manager for the in_function_ member variable. // RAII-style manager for the in_function_ member variable.
@ -199,6 +212,40 @@ class AsmTyper final {
DISALLOW_IMPLICIT_CONSTRUCTORS(FlattenedStatements); DISALLOW_IMPLICIT_CONSTRUCTORS(FlattenedStatements);
}; };
class SourceLayoutTracker {
public:
SourceLayoutTracker() = default;
bool IsValid() const;
void AddUseAsm(const AstNode& node) { use_asm_.AddNewElement(node); }
void AddGlobal(const AstNode& node) { globals_.AddNewElement(node); }
void AddFunction(const AstNode& node) { functions_.AddNewElement(node); }
void AddTable(const AstNode& node) { tables_.AddNewElement(node); }
void AddExport(const AstNode& node) { exports_.AddNewElement(node); }
private:
class Section {
public:
Section() = default;
Section(const Section&) = default;
Section& operator=(const Section&) = default;
void AddNewElement(const AstNode& node);
bool IsPrecededBy(const Section& other) const;
private:
int start_ = kNoSourcePosition;
int end_ = kNoSourcePosition;
};
Section use_asm_;
Section globals_;
Section functions_;
Section tables_;
Section exports_;
DISALLOW_COPY_AND_ASSIGN(SourceLayoutTracker);
};
using ObjectTypeMap = ZoneMap<std::string, VariableInfo*>; using ObjectTypeMap = ZoneMap<std::string, VariableInfo*>;
void InitializeStdlib(); void InitializeStdlib();
void SetTypeOf(AstNode* node, AsmType* type); void SetTypeOf(AstNode* node, AsmType* type);
@ -220,7 +267,10 @@ class AsmTyper final {
// validation failure. // validation failure.
// 6.1 ValidateModule // 6.1 ValidateModule
AsmType* ValidateModule(FunctionLiteral* fun); AsmType* ValidateModuleBeforeFunctionsPhase(FunctionLiteral* fun);
AsmType* ValidateModuleFunction(FunctionDeclaration* fun_decl);
AsmType* ValidateModuleFunctions(FunctionLiteral* fun);
AsmType* ValidateModuleAfterFunctionsPhase(FunctionLiteral* fun);
AsmType* ValidateGlobalDeclaration(Assignment* assign); AsmType* ValidateGlobalDeclaration(Assignment* assign);
// 6.2 ValidateExport // 6.2 ValidateExport
AsmType* ExportType(VariableProxy* fun_export); AsmType* ExportType(VariableProxy* fun_export);
@ -323,7 +373,7 @@ class AsmTyper final {
Isolate* isolate_; Isolate* isolate_;
Zone* zone_; Zone* zone_;
Script* script_; Handle<Script> script_;
FunctionLiteral* root_; FunctionLiteral* root_;
bool in_function_ = false; bool in_function_ = false;
@ -345,13 +395,19 @@ class AsmTyper final {
std::uintptr_t stack_limit_; std::uintptr_t stack_limit_;
bool stack_overflow_ = false; bool stack_overflow_ = false;
ZoneMap<AstNode*, AsmType*> node_types_; std::unordered_map<AstNode*, AsmType*> module_node_types_;
static const int kErrorMessageLimit = 100; std::unordered_map<AstNode*, AsmType*> function_node_types_;
static const int kErrorMessageLimit = 128;
AsmType* fround_type_; AsmType* fround_type_;
AsmType* ffi_type_; AsmType* ffi_type_;
char error_message_[kErrorMessageLimit]; Handle<JSMessageObject> error_message_;
MessageLocation message_location_;
StdlibSet stdlib_uses_; StdlibSet stdlib_uses_;
SourceLayoutTracker source_layout_;
ReturnStatement* module_return_;
ZoneVector<Assignment*> function_pointer_tables_;
DISALLOW_IMPLICIT_CONSTRUCTORS(AsmTyper); DISALLOW_IMPLICIT_CONSTRUCTORS(AsmTyper);
}; };

View File

@ -6,6 +6,7 @@
#include <cinttypes> #include <cinttypes>
#include "src/utils.h"
#include "src/v8.h" #include "src/v8.h"
namespace v8 { namespace v8 {

View File

@ -19,6 +19,11 @@
#include "src/ast/ast.h" #include "src/ast/ast.h"
#include "src/ast/scopes.h" #include "src/ast/scopes.h"
#include "src/codegen.h"
#include "src/compilation-info.h"
#include "src/compiler.h"
#include "src/isolate.h"
#include "src/parsing/parse-info.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -37,13 +42,14 @@ enum ValueFate { kDrop, kLeaveOnStack };
struct ForeignVariable { struct ForeignVariable {
Handle<Name> name; Handle<Name> name;
Variable* var; Variable* var;
LocalType type; ValueType type;
}; };
class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> { class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
public: public:
AsmWasmBuilderImpl(Isolate* isolate, Zone* zone, FunctionLiteral* literal, AsmWasmBuilderImpl(Isolate* isolate, Zone* zone, CompilationInfo* info,
AsmTyper* typer) AstValueFactory* ast_value_factory, Handle<Script> script,
FunctionLiteral* literal, AsmTyper* typer)
: local_variables_(ZoneHashMap::kDefaultHashMapCapacity, : local_variables_(ZoneHashMap::kDefaultHashMapCapacity,
ZoneAllocationPolicy(zone)), ZoneAllocationPolicy(zone)),
functions_(ZoneHashMap::kDefaultHashMapCapacity, functions_(ZoneHashMap::kDefaultHashMapCapacity,
@ -56,15 +62,20 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
literal_(literal), literal_(literal),
isolate_(isolate), isolate_(isolate),
zone_(zone), zone_(zone),
info_(info),
ast_value_factory_(ast_value_factory),
script_(script),
typer_(typer), typer_(typer),
typer_failed_(false),
typer_finished_(false),
breakable_blocks_(zone), breakable_blocks_(zone),
foreign_variables_(zone), foreign_variables_(zone),
init_function_(nullptr), init_function_(nullptr),
foreign_init_function_(nullptr), foreign_init_function_(nullptr),
next_table_index_(0),
function_tables_(ZoneHashMap::kDefaultHashMapCapacity, function_tables_(ZoneHashMap::kDefaultHashMapCapacity,
ZoneAllocationPolicy(zone)), ZoneAllocationPolicy(zone)),
imported_function_table_(this) { imported_function_table_(this),
parent_binop_(nullptr) {
InitializeAstVisitor(isolate); InitializeAstVisitor(isolate);
} }
@ -90,10 +101,11 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
uint32_t index = LookupOrInsertGlobal(fv->var, fv->type); uint32_t index = LookupOrInsertGlobal(fv->var, fv->type);
foreign_init_function_->EmitWithVarInt(kExprSetGlobal, index); foreign_init_function_->EmitWithVarInt(kExprSetGlobal, index);
} }
foreign_init_function_->Emit(kExprEnd);
} }
i::Handle<i::FixedArray> GetForeignArgs() { Handle<FixedArray> GetForeignArgs() {
i::Handle<FixedArray> ret = isolate_->factory()->NewFixedArray( Handle<FixedArray> ret = isolate_->factory()->NewFixedArray(
static_cast<int>(foreign_variables_.size())); static_cast<int>(foreign_variables_.size()));
for (size_t i = 0; i < foreign_variables_.size(); ++i) { for (size_t i = 0; i < foreign_variables_.size(); ++i) {
ForeignVariable* fv = &foreign_variables_[i]; ForeignVariable* fv = &foreign_variables_[i];
@ -102,10 +114,26 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
return ret; return ret;
} }
void Build() { bool Build() {
InitializeInitFunction(); InitializeInitFunction();
RECURSE(VisitFunctionLiteral(literal_)); if (!typer_->ValidateBeforeFunctionsPhase()) {
return false;
}
DCHECK(!HasStackOverflow());
VisitFunctionLiteral(literal_);
if (HasStackOverflow()) {
return false;
}
if (!typer_finished_ && !typer_failed_) {
typer_->FailWithMessage("Module missing export section.");
typer_failed_ = true;
}
if (typer_failed_) {
return false;
}
BuildForeignInitFunction(); BuildForeignInitFunction();
init_function_->Emit(kExprEnd); // finish init function.
return true;
} }
void VisitVariableDeclaration(VariableDeclaration* decl) {} void VisitVariableDeclaration(VariableDeclaration* decl) {}
@ -113,12 +141,65 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
void VisitFunctionDeclaration(FunctionDeclaration* decl) { void VisitFunctionDeclaration(FunctionDeclaration* decl) {
DCHECK_EQ(kModuleScope, scope_); DCHECK_EQ(kModuleScope, scope_);
DCHECK_NULL(current_function_builder_); DCHECK_NULL(current_function_builder_);
FunctionLiteral* old_func = decl->fun();
Zone zone(isolate_->allocator(), ZONE_NAME);
DeclarationScope* new_func_scope = nullptr;
if (decl->fun()->body() == nullptr) {
// TODO(titzer/bradnelson): Reuse SharedFunctionInfos used here when
// compiling the wasm module.
Handle<SharedFunctionInfo> shared =
Compiler::GetSharedFunctionInfo(decl->fun(), script_, info_);
shared->set_is_toplevel(false);
ParseInfo info(&zone, script_);
info.set_shared_info(shared);
info.set_toplevel(false);
info.set_language_mode(decl->fun()->scope()->language_mode());
info.set_allow_lazy_parsing(false);
info.set_function_literal_id(shared->function_literal_id());
info.set_ast_value_factory(ast_value_factory_);
info.set_ast_value_factory_owned(false);
// Create fresh function scope to use to parse the function in.
new_func_scope = new (info.zone()) DeclarationScope(
info.zone(), decl->fun()->scope()->outer_scope(), FUNCTION_SCOPE);
info.set_asm_function_scope(new_func_scope);
if (!Compiler::ParseAndAnalyze(&info)) {
typer_failed_ = true;
return;
}
FunctionLiteral* func = info.literal();
DCHECK_NOT_NULL(func);
decl->set_fun(func);
}
if (!typer_->ValidateInnerFunction(decl)) {
typer_failed_ = true;
decl->set_fun(old_func);
if (new_func_scope != nullptr) {
DCHECK_EQ(new_func_scope, decl->scope()->inner_scope());
if (!decl->scope()->RemoveInnerScope(new_func_scope)) {
UNREACHABLE();
}
}
return;
}
current_function_builder_ = LookupOrInsertFunction(decl->proxy()->var()); current_function_builder_ = LookupOrInsertFunction(decl->proxy()->var());
scope_ = kFuncScope; scope_ = kFuncScope;
// Record start of the function, used as position for the stack check.
current_function_builder_->SetAsmFunctionStartPosition(
decl->fun()->start_position());
RECURSE(Visit(decl->fun())); RECURSE(Visit(decl->fun()));
decl->set_fun(old_func);
if (new_func_scope != nullptr) {
DCHECK_EQ(new_func_scope, decl->scope()->inner_scope());
if (!decl->scope()->RemoveInnerScope(new_func_scope)) {
UNREACHABLE();
}
}
scope_ = kModuleScope; scope_ = kModuleScope;
current_function_builder_ = nullptr; current_function_builder_ = nullptr;
local_variables_.Clear(); local_variables_.Clear();
typer_->ClearFunctionNodeTypes();
} }
void VisitStatements(ZoneList<Statement*>* stmts) { void VisitStatements(ZoneList<Statement*>* stmts) {
@ -129,7 +210,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
continue; continue;
} }
RECURSE(Visit(stmt)); RECURSE(Visit(stmt));
if (stmt->IsJump()) break; if (typer_failed_) break;
} }
} }
@ -204,6 +285,8 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
void VisitEmptyParentheses(EmptyParentheses* paren) { UNREACHABLE(); } void VisitEmptyParentheses(EmptyParentheses* paren) { UNREACHABLE(); }
void VisitGetIterator(GetIterator* expr) { UNREACHABLE(); }
void VisitIfStatement(IfStatement* stmt) { void VisitIfStatement(IfStatement* stmt) {
DCHECK_EQ(kFuncScope, scope_); DCHECK_EQ(kFuncScope, scope_);
RECURSE(Visit(stmt->condition())); RECURSE(Visit(stmt->condition()));
@ -245,6 +328,16 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
void VisitReturnStatement(ReturnStatement* stmt) { void VisitReturnStatement(ReturnStatement* stmt) {
if (scope_ == kModuleScope) { if (scope_ == kModuleScope) {
if (typer_finished_) {
typer_->FailWithMessage("Module has multiple returns.");
typer_failed_ = true;
return;
}
if (!typer_->ValidateAfterFunctionsPhase()) {
typer_failed_ = true;
return;
}
typer_finished_ = true;
scope_ = kExportScope; scope_ = kExportScope;
RECURSE(Visit(stmt->expression())); RECURSE(Visit(stmt->expression()));
scope_ = kModuleScope; scope_ = kModuleScope;
@ -440,16 +533,21 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
// Add the parameters for the function. // Add the parameters for the function.
const auto& arguments = func_type->Arguments(); const auto& arguments = func_type->Arguments();
for (int i = 0; i < expr->parameter_count(); ++i) { for (int i = 0; i < expr->parameter_count(); ++i) {
LocalType type = TypeFrom(arguments[i]); ValueType type = TypeFrom(arguments[i]);
DCHECK_NE(kAstStmt, type); DCHECK_NE(kWasmStmt, type);
InsertParameter(scope->parameter(i), type, i); InsertParameter(scope->parameter(i), type, i);
} }
} else { } else {
UNREACHABLE(); UNREACHABLE();
} }
} }
RECURSE(VisitStatements(expr->body()));
RECURSE(VisitDeclarations(scope->declarations())); RECURSE(VisitDeclarations(scope->declarations()));
if (typer_failed_) return;
RECURSE(VisitStatements(expr->body()));
if (scope_ == kFuncScope) {
// Finish the function-body scope block.
current_function_builder_->Emit(kExprEnd);
}
} }
void VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) { void VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) {
@ -461,18 +559,18 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
RECURSE(Visit(expr->condition())); RECURSE(Visit(expr->condition()));
// WASM ifs come with implicit blocks for both arms. // WASM ifs come with implicit blocks for both arms.
breakable_blocks_.push_back(std::make_pair(nullptr, false)); breakable_blocks_.push_back(std::make_pair(nullptr, false));
LocalTypeCode type; ValueTypeCode type;
switch (TypeOf(expr)) { switch (TypeOf(expr)) {
case kAstI32: case kWasmI32:
type = kLocalI32; type = kLocalI32;
break; break;
case kAstI64: case kWasmI64:
type = kLocalI64; type = kLocalI64;
break; break;
case kAstF32: case kWasmF32:
type = kLocalF32; type = kLocalF32;
break; break;
case kAstF64: case kWasmF64:
type = kLocalF64; type = kLocalF64;
break; break;
default: default:
@ -544,8 +642,8 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
if (VisitStdlibConstant(var)) { if (VisitStdlibConstant(var)) {
return; return;
} }
LocalType var_type = TypeOf(expr); ValueType var_type = TypeOf(expr);
DCHECK_NE(kAstStmt, var_type); DCHECK_NE(kWasmStmt, var_type);
if (var->IsContextSlot()) { if (var->IsContextSlot()) {
current_function_builder_->EmitWithVarInt( current_function_builder_->EmitWithVarInt(
kExprGetGlobal, LookupOrInsertGlobal(var, var_type)); kExprGetGlobal, LookupOrInsertGlobal(var, var_type));
@ -638,12 +736,13 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
Literal* name = prop->key()->AsLiteral(); Literal* name = prop->key()->AsLiteral();
DCHECK_NOT_NULL(name); DCHECK_NOT_NULL(name);
DCHECK(name->IsPropertyName()); DCHECK(name->IsPropertyName());
const AstRawString* raw_name = name->AsRawPropertyName(); Handle<String> function_name = name->AsPropertyName();
int length;
std::unique_ptr<char[]> utf8 = function_name->ToCString(
DISALLOW_NULLS, FAST_STRING_TRAVERSAL, &length);
if (var->is_function()) { if (var->is_function()) {
WasmFunctionBuilder* function = LookupOrInsertFunction(var); WasmFunctionBuilder* function = LookupOrInsertFunction(var);
function->Export(); function->ExportAs({utf8.get(), length});
function->SetName({reinterpret_cast<const char*>(raw_name->raw_data()),
raw_name->length()});
} }
} }
} }
@ -660,53 +759,67 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
current_function_builder_ = nullptr; current_function_builder_ = nullptr;
} }
void AddFunctionTable(VariableProxy* table, ArrayLiteral* funcs) { struct FunctionTableIndices : public ZoneObject {
auto* func_tbl_type = typer_->TypeOf(funcs)->AsFunctionTableType(); uint32_t start_index;
DCHECK_NOT_NULL(func_tbl_type); uint32_t signature_index;
auto* func_type = func_tbl_type->signature()->AsFunctionType(); };
FunctionTableIndices* LookupOrAddFunctionTable(VariableProxy* table,
Property* p) {
FunctionTableIndices* indices = LookupFunctionTable(table->var());
if (indices != nullptr) {
// Already setup.
return indices;
}
indices = new (zone()) FunctionTableIndices();
auto* func_type = typer_->TypeOf(p)->AsFunctionType();
auto* func_table_type = typer_->TypeOf(p->obj()->AsVariableProxy()->var())
->AsFunctionTableType();
const auto& arguments = func_type->Arguments(); const auto& arguments = func_type->Arguments();
LocalType return_type = TypeFrom(func_type->ReturnType()); ValueType return_type = TypeFrom(func_type->ReturnType());
FunctionSig::Builder sig(zone(), return_type == kAstStmt ? 0 : 1, FunctionSig::Builder sig(zone(), return_type == kWasmStmt ? 0 : 1,
arguments.size()); arguments.size());
if (return_type != kAstStmt) { if (return_type != kWasmStmt) {
sig.AddReturn(return_type); sig.AddReturn(return_type);
} }
for (auto* arg : arguments) { for (auto* arg : arguments) {
sig.AddParam(TypeFrom(arg)); sig.AddParam(TypeFrom(arg));
} }
uint32_t signature_index = builder_->AddSignature(sig.Build()); uint32_t signature_index = builder_->AddSignature(sig.Build());
InsertFunctionTable(table->var(), next_table_index_, signature_index); indices->start_index = builder_->AllocateIndirectFunctions(
next_table_index_ += funcs->values()->length(); static_cast<uint32_t>(func_table_type->length()));
for (int i = 0; i < funcs->values()->length(); ++i) { indices->signature_index = signature_index;
VariableProxy* func = funcs->values()->at(i)->AsVariableProxy();
DCHECK_NOT_NULL(func);
builder_->AddIndirectFunction(
LookupOrInsertFunction(func->var())->func_index());
}
}
struct FunctionTableIndices : public ZoneObject {
uint32_t start_index;
uint32_t signature_index;
};
void InsertFunctionTable(Variable* v, uint32_t start_index,
uint32_t signature_index) {
FunctionTableIndices* container = new (zone()) FunctionTableIndices();
container->start_index = start_index;
container->signature_index = signature_index;
ZoneHashMap::Entry* entry = function_tables_.LookupOrInsert( ZoneHashMap::Entry* entry = function_tables_.LookupOrInsert(
v, ComputePointerHash(v), ZoneAllocationPolicy(zone())); table->var(), ComputePointerHash(table->var()),
entry->value = container; ZoneAllocationPolicy(zone()));
entry->value = indices;
return indices;
} }
FunctionTableIndices* LookupFunctionTable(Variable* v) { FunctionTableIndices* LookupFunctionTable(Variable* v) {
ZoneHashMap::Entry* entry = ZoneHashMap::Entry* entry =
function_tables_.Lookup(v, ComputePointerHash(v)); function_tables_.Lookup(v, ComputePointerHash(v));
DCHECK_NOT_NULL(entry); if (entry == nullptr) {
return nullptr;
}
return reinterpret_cast<FunctionTableIndices*>(entry->value); return reinterpret_cast<FunctionTableIndices*>(entry->value);
} }
void PopulateFunctionTable(VariableProxy* table, ArrayLiteral* funcs) {
FunctionTableIndices* indices = LookupFunctionTable(table->var());
// Ignore unused function tables.
if (indices == nullptr) {
return;
}
for (int i = 0; i < funcs->values()->length(); ++i) {
VariableProxy* func = funcs->values()->at(i)->AsVariableProxy();
DCHECK_NOT_NULL(func);
builder_->SetIndirectFunction(
indices->start_index + i,
LookupOrInsertFunction(func->var())->func_index());
}
}
class ImportedFunctionTable { class ImportedFunctionTable {
private: private:
class ImportedFunctionIndices : public ZoneObject { class ImportedFunctionIndices : public ZoneObject {
@ -727,20 +840,33 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
ZoneAllocationPolicy(builder->zone())), ZoneAllocationPolicy(builder->zone())),
builder_(builder) {} builder_(builder) {}
void AddImport(Variable* v, const char* name, int name_length) { ImportedFunctionIndices* LookupOrInsertImport(Variable* v) {
ImportedFunctionIndices* indices = new (builder_->zone())
ImportedFunctionIndices(name, name_length, builder_->zone());
auto* entry = table_.LookupOrInsert( auto* entry = table_.LookupOrInsert(
v, ComputePointerHash(v), ZoneAllocationPolicy(builder_->zone())); v, ComputePointerHash(v), ZoneAllocationPolicy(builder_->zone()));
entry->value = indices; ImportedFunctionIndices* indices;
if (entry->value == nullptr) {
indices = new (builder_->zone())
ImportedFunctionIndices(nullptr, 0, builder_->zone());
entry->value = indices;
} else {
indices = reinterpret_cast<ImportedFunctionIndices*>(entry->value);
}
return indices;
}
void SetImportName(Variable* v, const char* name, int name_length) {
auto* indices = LookupOrInsertImport(v);
indices->name_ = name;
indices->name_length_ = name_length;
for (auto i : indices->signature_to_index_) {
builder_->builder_->SetImportName(i.second, indices->name_,
indices->name_length_);
}
} }
// Get a function's index (or allocate if new). // Get a function's index (or allocate if new).
uint32_t LookupOrInsertImport(Variable* v, FunctionSig* sig) { uint32_t LookupOrInsertImportUse(Variable* v, FunctionSig* sig) {
ZoneHashMap::Entry* entry = table_.Lookup(v, ComputePointerHash(v)); auto* indices = LookupOrInsertImport(v);
DCHECK_NOT_NULL(entry);
ImportedFunctionIndices* indices =
reinterpret_cast<ImportedFunctionIndices*>(entry->value);
WasmModuleBuilder::SignatureMap::iterator pos = WasmModuleBuilder::SignatureMap::iterator pos =
indices->signature_to_index_.find(sig); indices->signature_to_index_.find(sig);
if (pos != indices->signature_to_index_.end()) { if (pos != indices->signature_to_index_.end()) {
@ -819,8 +945,8 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
if (target_var != nullptr) { if (target_var != nullptr) {
// Left hand side is a local or a global variable. // Left hand side is a local or a global variable.
Variable* var = target_var->var(); Variable* var = target_var->var();
LocalType var_type = TypeOf(expr); ValueType var_type = TypeOf(expr);
DCHECK_NE(kAstStmt, var_type); DCHECK_NE(kWasmStmt, var_type);
if (var->IsContextSlot()) { if (var->IsContextSlot()) {
uint32_t index = LookupOrInsertGlobal(var, var_type); uint32_t index = LookupOrInsertGlobal(var, var_type);
current_function_builder_->EmitWithVarInt(kExprSetGlobal, index); current_function_builder_->EmitWithVarInt(kExprSetGlobal, index);
@ -841,7 +967,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
Property* target_prop = expr->target()->AsProperty(); Property* target_prop = expr->target()->AsProperty();
if (target_prop != nullptr) { if (target_prop != nullptr) {
// Left hand side is a property access, i.e. the asm.js heap. // Left hand side is a property access, i.e. the asm.js heap.
if (TypeOf(expr->value()) == kAstF64 && expr->target()->IsProperty() && if (TypeOf(expr->value()) == kWasmF64 && expr->target()->IsProperty() &&
typer_->TypeOf(expr->target()->AsProperty()->obj()) typer_->TypeOf(expr->target()->AsProperty()->obj())
->IsA(AsmType::Float32Array())) { ->IsA(AsmType::Float32Array())) {
current_function_builder_->Emit(kExprF32ConvertF64); current_function_builder_->Emit(kExprF32ConvertF64);
@ -901,7 +1027,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
if (typer_->TypeOf(target)->AsFFIType() != nullptr) { if (typer_->TypeOf(target)->AsFFIType() != nullptr) {
const AstRawString* name = const AstRawString* name =
prop->key()->AsLiteral()->AsRawPropertyName(); prop->key()->AsLiteral()->AsRawPropertyName();
imported_function_table_.AddImport( imported_function_table_.SetImportName(
target->var(), reinterpret_cast<const char*>(name->raw_data()), target->var(), reinterpret_cast<const char*>(name->raw_data()),
name->length()); name->length());
} }
@ -910,14 +1036,10 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
return; return;
} }
ArrayLiteral* funcs = expr->value()->AsArrayLiteral(); ArrayLiteral* funcs = expr->value()->AsArrayLiteral();
if (funcs != nullptr && if (funcs != nullptr) {
typer_->TypeOf(funcs)
->AsFunctionTableType()
->signature()
->AsFunctionType()) {
VariableProxy* target = expr->target()->AsVariableProxy(); VariableProxy* target = expr->target()->AsVariableProxy();
DCHECK_NOT_NULL(target); DCHECK_NOT_NULL(target);
AddFunctionTable(target, funcs); PopulateFunctionTable(target, funcs);
// Only add to the function table. No init needed. // Only add to the function table. No init needed.
return; return;
} }
@ -952,8 +1074,8 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
DCHECK_NOT_NULL(key_literal); DCHECK_NOT_NULL(key_literal);
if (!key_literal->value().is_null()) { if (!key_literal->value().is_null()) {
Handle<Name> name = Handle<Name> name =
i::Object::ToName(isolate_, key_literal->value()).ToHandleChecked(); Object::ToName(isolate_, key_literal->value()).ToHandleChecked();
LocalType type = is_float ? kAstF64 : kAstI32; ValueType type = is_float ? kWasmF64 : kWasmI32;
foreign_variables_.push_back({name, var, type}); foreign_variables_.push_back({name, var, type});
} }
} }
@ -961,7 +1083,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
void VisitPropertyAndEmitIndex(Property* expr, AsmType** atype) { void VisitPropertyAndEmitIndex(Property* expr, AsmType** atype) {
Expression* obj = expr->obj(); Expression* obj = expr->obj();
*atype = typer_->TypeOf(obj); *atype = typer_->TypeOf(obj);
int size = (*atype)->ElementSizeInBytes(); int32_t size = (*atype)->ElementSizeInBytes();
if (size == 1) { if (size == 1) {
// Allow more general expression in byte arrays than the spec // Allow more general expression in byte arrays than the spec
// strictly permits. // strictly permits.
@ -974,7 +1096,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
Literal* value = expr->key()->AsLiteral(); Literal* value = expr->key()->AsLiteral();
if (value) { if (value) {
DCHECK(value->raw_value()->IsNumber()); DCHECK(value->raw_value()->IsNumber());
DCHECK_EQ(kAstI32, TypeOf(value)); DCHECK_EQ(kWasmI32, TypeOf(value));
int32_t val = static_cast<int32_t>(value->raw_value()->AsNumber()); int32_t val = static_cast<int32_t>(value->raw_value()->AsNumber());
// TODO(titzer): handle overflow here. // TODO(titzer): handle overflow here.
current_function_builder_->EmitI32Const(val * size); current_function_builder_->EmitI32Const(val * size);
@ -984,14 +1106,13 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
if (binop) { if (binop) {
DCHECK_EQ(Token::SAR, binop->op()); DCHECK_EQ(Token::SAR, binop->op());
DCHECK(binop->right()->AsLiteral()->raw_value()->IsNumber()); DCHECK(binop->right()->AsLiteral()->raw_value()->IsNumber());
DCHECK(kAstI32 == TypeOf(binop->right()->AsLiteral())); DCHECK(kWasmI32 == TypeOf(binop->right()->AsLiteral()));
DCHECK_EQ(size, DCHECK_EQ(size,
1 << static_cast<int>( 1 << static_cast<int>(
binop->right()->AsLiteral()->raw_value()->AsNumber())); binop->right()->AsLiteral()->raw_value()->AsNumber()));
// Mask bottom bits to match asm.js behavior. // Mask bottom bits to match asm.js behavior.
byte mask = static_cast<byte>(~(size - 1));
RECURSE(Visit(binop->left())); RECURSE(Visit(binop->left()));
current_function_builder_->EmitWithU8(kExprI8Const, mask); current_function_builder_->EmitI32Const(~(size - 1));
current_function_builder_->Emit(kExprI32And); current_function_builder_->Emit(kExprI32And);
return; return;
} }
@ -1030,7 +1151,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
AsmTyper::StandardMember standard_object = AsmTyper::StandardMember standard_object =
typer_->VariableAsStandardMember(var); typer_->VariableAsStandardMember(var);
ZoneList<Expression*>* args = call->arguments(); ZoneList<Expression*>* args = call->arguments();
LocalType call_type = TypeOf(call); ValueType call_type = TypeOf(call);
switch (standard_object) { switch (standard_object) {
case AsmTyper::kNone: { case AsmTyper::kNone: {
@ -1038,57 +1159,57 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
} }
case AsmTyper::kMathAcos: { case AsmTyper::kMathAcos: {
VisitCallArgs(call); VisitCallArgs(call);
DCHECK_EQ(kAstF64, call_type); DCHECK_EQ(kWasmF64, call_type);
current_function_builder_->Emit(kExprF64Acos); current_function_builder_->Emit(kExprF64Acos);
break; break;
} }
case AsmTyper::kMathAsin: { case AsmTyper::kMathAsin: {
VisitCallArgs(call); VisitCallArgs(call);
DCHECK_EQ(kAstF64, call_type); DCHECK_EQ(kWasmF64, call_type);
current_function_builder_->Emit(kExprF64Asin); current_function_builder_->Emit(kExprF64Asin);
break; break;
} }
case AsmTyper::kMathAtan: { case AsmTyper::kMathAtan: {
VisitCallArgs(call); VisitCallArgs(call);
DCHECK_EQ(kAstF64, call_type); DCHECK_EQ(kWasmF64, call_type);
current_function_builder_->Emit(kExprF64Atan); current_function_builder_->Emit(kExprF64Atan);
break; break;
} }
case AsmTyper::kMathCos: { case AsmTyper::kMathCos: {
VisitCallArgs(call); VisitCallArgs(call);
DCHECK_EQ(kAstF64, call_type); DCHECK_EQ(kWasmF64, call_type);
current_function_builder_->Emit(kExprF64Cos); current_function_builder_->Emit(kExprF64Cos);
break; break;
} }
case AsmTyper::kMathSin: { case AsmTyper::kMathSin: {
VisitCallArgs(call); VisitCallArgs(call);
DCHECK_EQ(kAstF64, call_type); DCHECK_EQ(kWasmF64, call_type);
current_function_builder_->Emit(kExprF64Sin); current_function_builder_->Emit(kExprF64Sin);
break; break;
} }
case AsmTyper::kMathTan: { case AsmTyper::kMathTan: {
VisitCallArgs(call); VisitCallArgs(call);
DCHECK_EQ(kAstF64, call_type); DCHECK_EQ(kWasmF64, call_type);
current_function_builder_->Emit(kExprF64Tan); current_function_builder_->Emit(kExprF64Tan);
break; break;
} }
case AsmTyper::kMathExp: { case AsmTyper::kMathExp: {
VisitCallArgs(call); VisitCallArgs(call);
DCHECK_EQ(kAstF64, call_type); DCHECK_EQ(kWasmF64, call_type);
current_function_builder_->Emit(kExprF64Exp); current_function_builder_->Emit(kExprF64Exp);
break; break;
} }
case AsmTyper::kMathLog: { case AsmTyper::kMathLog: {
VisitCallArgs(call); VisitCallArgs(call);
DCHECK_EQ(kAstF64, call_type); DCHECK_EQ(kWasmF64, call_type);
current_function_builder_->Emit(kExprF64Log); current_function_builder_->Emit(kExprF64Log);
break; break;
} }
case AsmTyper::kMathCeil: { case AsmTyper::kMathCeil: {
VisitCallArgs(call); VisitCallArgs(call);
if (call_type == kAstF32) { if (call_type == kWasmF32) {
current_function_builder_->Emit(kExprF32Ceil); current_function_builder_->Emit(kExprF32Ceil);
} else if (call_type == kAstF64) { } else if (call_type == kWasmF64) {
current_function_builder_->Emit(kExprF64Ceil); current_function_builder_->Emit(kExprF64Ceil);
} else { } else {
UNREACHABLE(); UNREACHABLE();
@ -1097,9 +1218,9 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
} }
case AsmTyper::kMathFloor: { case AsmTyper::kMathFloor: {
VisitCallArgs(call); VisitCallArgs(call);
if (call_type == kAstF32) { if (call_type == kWasmF32) {
current_function_builder_->Emit(kExprF32Floor); current_function_builder_->Emit(kExprF32Floor);
} else if (call_type == kAstF64) { } else if (call_type == kWasmF64) {
current_function_builder_->Emit(kExprF64Floor); current_function_builder_->Emit(kExprF64Floor);
} else { } else {
UNREACHABLE(); UNREACHABLE();
@ -1108,9 +1229,9 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
} }
case AsmTyper::kMathSqrt: { case AsmTyper::kMathSqrt: {
VisitCallArgs(call); VisitCallArgs(call);
if (call_type == kAstF32) { if (call_type == kWasmF32) {
current_function_builder_->Emit(kExprF32Sqrt); current_function_builder_->Emit(kExprF32Sqrt);
} else if (call_type == kAstF64) { } else if (call_type == kWasmF64) {
current_function_builder_->Emit(kExprF64Sqrt); current_function_builder_->Emit(kExprF64Sqrt);
} else { } else {
UNREACHABLE(); UNREACHABLE();
@ -1119,18 +1240,18 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
} }
case AsmTyper::kMathClz32: { case AsmTyper::kMathClz32: {
VisitCallArgs(call); VisitCallArgs(call);
DCHECK(call_type == kAstI32); DCHECK(call_type == kWasmI32);
current_function_builder_->Emit(kExprI32Clz); current_function_builder_->Emit(kExprI32Clz);
break; break;
} }
case AsmTyper::kMathAbs: { case AsmTyper::kMathAbs: {
if (call_type == kAstI32) { if (call_type == kWasmI32) {
WasmTemporary tmp(current_function_builder_, kAstI32); WasmTemporary tmp(current_function_builder_, kWasmI32);
// if set_local(tmp, x) < 0 // if set_local(tmp, x) < 0
Visit(call->arguments()->at(0)); Visit(call->arguments()->at(0));
current_function_builder_->EmitTeeLocal(tmp.index()); current_function_builder_->EmitTeeLocal(tmp.index());
byte code[] = {WASM_I8(0)}; byte code[] = {WASM_ZERO};
current_function_builder_->EmitCode(code, sizeof(code)); current_function_builder_->EmitCode(code, sizeof(code));
current_function_builder_->Emit(kExprI32LtS); current_function_builder_->Emit(kExprI32LtS);
current_function_builder_->EmitWithU8(kExprIf, kLocalI32); current_function_builder_->EmitWithU8(kExprIf, kLocalI32);
@ -1146,10 +1267,10 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
// end // end
current_function_builder_->Emit(kExprEnd); current_function_builder_->Emit(kExprEnd);
} else if (call_type == kAstF32) { } else if (call_type == kWasmF32) {
VisitCallArgs(call); VisitCallArgs(call);
current_function_builder_->Emit(kExprF32Abs); current_function_builder_->Emit(kExprF32Abs);
} else if (call_type == kAstF64) { } else if (call_type == kWasmF64) {
VisitCallArgs(call); VisitCallArgs(call);
current_function_builder_->Emit(kExprF64Abs); current_function_builder_->Emit(kExprF64Abs);
} else { } else {
@ -1159,9 +1280,9 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
} }
case AsmTyper::kMathMin: { case AsmTyper::kMathMin: {
// TODO(bradnelson): Change wasm to match Math.min in asm.js mode. // TODO(bradnelson): Change wasm to match Math.min in asm.js mode.
if (call_type == kAstI32) { if (call_type == kWasmI32) {
WasmTemporary tmp_x(current_function_builder_, kAstI32); WasmTemporary tmp_x(current_function_builder_, kWasmI32);
WasmTemporary tmp_y(current_function_builder_, kAstI32); WasmTemporary tmp_y(current_function_builder_, kWasmI32);
// if set_local(tmp_x, x) < set_local(tmp_y, y) // if set_local(tmp_x, x) < set_local(tmp_y, y)
Visit(call->arguments()->at(0)); Visit(call->arguments()->at(0));
@ -1181,10 +1302,10 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
current_function_builder_->EmitGetLocal(tmp_y.index()); current_function_builder_->EmitGetLocal(tmp_y.index());
current_function_builder_->Emit(kExprEnd); current_function_builder_->Emit(kExprEnd);
} else if (call_type == kAstF32) { } else if (call_type == kWasmF32) {
VisitCallArgs(call); VisitCallArgs(call);
current_function_builder_->Emit(kExprF32Min); current_function_builder_->Emit(kExprF32Min);
} else if (call_type == kAstF64) { } else if (call_type == kWasmF64) {
VisitCallArgs(call); VisitCallArgs(call);
current_function_builder_->Emit(kExprF64Min); current_function_builder_->Emit(kExprF64Min);
} else { } else {
@ -1194,9 +1315,9 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
} }
case AsmTyper::kMathMax: { case AsmTyper::kMathMax: {
// TODO(bradnelson): Change wasm to match Math.max in asm.js mode. // TODO(bradnelson): Change wasm to match Math.max in asm.js mode.
if (call_type == kAstI32) { if (call_type == kWasmI32) {
WasmTemporary tmp_x(current_function_builder_, kAstI32); WasmTemporary tmp_x(current_function_builder_, kWasmI32);
WasmTemporary tmp_y(current_function_builder_, kAstI32); WasmTemporary tmp_y(current_function_builder_, kWasmI32);
// if set_local(tmp_x, x) < set_local(tmp_y, y) // if set_local(tmp_x, x) < set_local(tmp_y, y)
Visit(call->arguments()->at(0)); Visit(call->arguments()->at(0));
@ -1217,10 +1338,10 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
current_function_builder_->EmitGetLocal(tmp_x.index()); current_function_builder_->EmitGetLocal(tmp_x.index());
current_function_builder_->Emit(kExprEnd); current_function_builder_->Emit(kExprEnd);
} else if (call_type == kAstF32) { } else if (call_type == kWasmF32) {
VisitCallArgs(call); VisitCallArgs(call);
current_function_builder_->Emit(kExprF32Max); current_function_builder_->Emit(kExprF32Max);
} else if (call_type == kAstF64) { } else if (call_type == kWasmF64) {
VisitCallArgs(call); VisitCallArgs(call);
current_function_builder_->Emit(kExprF64Max); current_function_builder_->Emit(kExprF64Max);
} else { } else {
@ -1230,13 +1351,13 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
} }
case AsmTyper::kMathAtan2: { case AsmTyper::kMathAtan2: {
VisitCallArgs(call); VisitCallArgs(call);
DCHECK_EQ(kAstF64, call_type); DCHECK_EQ(kWasmF64, call_type);
current_function_builder_->Emit(kExprF64Atan2); current_function_builder_->Emit(kExprF64Atan2);
break; break;
} }
case AsmTyper::kMathPow: { case AsmTyper::kMathPow: {
VisitCallArgs(call); VisitCallArgs(call);
DCHECK_EQ(kAstF64, call_type); DCHECK_EQ(kWasmF64, call_type);
current_function_builder_->Emit(kExprF64Pow); current_function_builder_->Emit(kExprF64Pow);
break; break;
} }
@ -1298,6 +1419,10 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
bool VisitCallExpression(Call* expr) { bool VisitCallExpression(Call* expr) {
Call::CallType call_type = expr->GetCallType(); Call::CallType call_type = expr->GetCallType();
bool returns_value = true; bool returns_value = true;
// Save the parent now, it might be overwritten in VisitCallArgs.
BinaryOperation* parent_binop = parent_binop_;
switch (call_type) { switch (call_type) {
case Call::OTHER_CALL: { case Call::OTHER_CALL: {
VariableProxy* proxy = expr->expression()->AsVariableProxy(); VariableProxy* proxy = expr->expression()->AsVariableProxy();
@ -1313,11 +1438,11 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
VariableProxy* vp = expr->expression()->AsVariableProxy(); VariableProxy* vp = expr->expression()->AsVariableProxy();
DCHECK_NOT_NULL(vp); DCHECK_NOT_NULL(vp);
if (typer_->TypeOf(vp)->AsFFIType() != nullptr) { if (typer_->TypeOf(vp)->AsFFIType() != nullptr) {
LocalType return_type = TypeOf(expr); ValueType return_type = TypeOf(expr);
ZoneList<Expression*>* args = expr->arguments(); ZoneList<Expression*>* args = expr->arguments();
FunctionSig::Builder sig(zone(), return_type == kAstStmt ? 0 : 1, FunctionSig::Builder sig(zone(), return_type == kWasmStmt ? 0 : 1,
args->length()); args->length());
if (return_type != kAstStmt) { if (return_type != kWasmStmt) {
sig.AddReturn(return_type); sig.AddReturn(return_type);
} else { } else {
returns_value = false; returns_value = false;
@ -1325,16 +1450,23 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
for (int i = 0; i < args->length(); ++i) { for (int i = 0; i < args->length(); ++i) {
sig.AddParam(TypeOf(args->at(i))); sig.AddParam(TypeOf(args->at(i)));
} }
uint32_t index = imported_function_table_.LookupOrInsertImport( uint32_t index = imported_function_table_.LookupOrInsertImportUse(
vp->var(), sig.Build()); vp->var(), sig.Build());
VisitCallArgs(expr); VisitCallArgs(expr);
current_function_builder_->AddAsmWasmOffset(expr->position()); // For non-void functions, we must know the parent node.
DCHECK_IMPLIES(returns_value, parent_binop != nullptr);
DCHECK_IMPLIES(returns_value, parent_binop->left() == expr ||
parent_binop->right() == expr);
int pos = expr->position();
int parent_pos = returns_value ? parent_binop->position() : pos;
current_function_builder_->AddAsmWasmOffset(pos, parent_pos);
current_function_builder_->Emit(kExprCallFunction); current_function_builder_->Emit(kExprCallFunction);
current_function_builder_->EmitVarInt(index); current_function_builder_->EmitVarInt(index);
} else { } else {
WasmFunctionBuilder* function = LookupOrInsertFunction(vp->var()); WasmFunctionBuilder* function = LookupOrInsertFunction(vp->var());
VisitCallArgs(expr); VisitCallArgs(expr);
current_function_builder_->AddAsmWasmOffset(expr->position()); current_function_builder_->AddAsmWasmOffset(expr->position(),
expr->position());
current_function_builder_->Emit(kExprCallFunction); current_function_builder_->Emit(kExprCallFunction);
current_function_builder_->EmitDirectCallIndex( current_function_builder_->EmitDirectCallIndex(
function->func_index()); function->func_index());
@ -1348,19 +1480,20 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
DCHECK_NOT_NULL(p); DCHECK_NOT_NULL(p);
VariableProxy* var = p->obj()->AsVariableProxy(); VariableProxy* var = p->obj()->AsVariableProxy();
DCHECK_NOT_NULL(var); DCHECK_NOT_NULL(var);
FunctionTableIndices* indices = LookupFunctionTable(var->var()); FunctionTableIndices* indices = LookupOrAddFunctionTable(var, p);
Visit(p->key()); // TODO(titzer): should use RECURSE() Visit(p->key()); // TODO(titzer): should use RECURSE()
// We have to use a temporary for the correct order of evaluation. // We have to use a temporary for the correct order of evaluation.
current_function_builder_->EmitI32Const(indices->start_index); current_function_builder_->EmitI32Const(indices->start_index);
current_function_builder_->Emit(kExprI32Add); current_function_builder_->Emit(kExprI32Add);
WasmTemporary tmp(current_function_builder_, kAstI32); WasmTemporary tmp(current_function_builder_, kWasmI32);
current_function_builder_->EmitSetLocal(tmp.index()); current_function_builder_->EmitSetLocal(tmp.index());
VisitCallArgs(expr); VisitCallArgs(expr);
current_function_builder_->EmitGetLocal(tmp.index()); current_function_builder_->EmitGetLocal(tmp.index());
current_function_builder_->AddAsmWasmOffset(expr->position()); current_function_builder_->AddAsmWasmOffset(expr->position(),
expr->position());
current_function_builder_->Emit(kExprCallIndirect); current_function_builder_->Emit(kExprCallIndirect);
current_function_builder_->EmitVarInt(indices->signature_index); current_function_builder_->EmitVarInt(indices->signature_index);
current_function_builder_->EmitVarInt(0); // table index current_function_builder_->EmitVarInt(0); // table index
@ -1383,7 +1516,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
RECURSE(Visit(expr->expression())); RECURSE(Visit(expr->expression()));
switch (expr->op()) { switch (expr->op()) {
case Token::NOT: { case Token::NOT: {
DCHECK_EQ(kAstI32, TypeOf(expr->expression())); DCHECK_EQ(kWasmI32, TypeOf(expr->expression()));
current_function_builder_->Emit(kExprI32Eqz); current_function_builder_->Emit(kExprI32Eqz);
break; break;
} }
@ -1398,10 +1531,10 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
int32_t val) { int32_t val) {
DCHECK_NOT_NULL(expr->right()); DCHECK_NOT_NULL(expr->right());
if (expr->op() == op && expr->right()->IsLiteral() && if (expr->op() == op && expr->right()->IsLiteral() &&
TypeOf(expr) == kAstI32) { TypeOf(expr) == kWasmI32) {
Literal* right = expr->right()->AsLiteral(); Literal* right = expr->right()->AsLiteral();
DCHECK(right->raw_value()->IsNumber()); if (right->raw_value()->IsNumber() &&
if (static_cast<int32_t>(right->raw_value()->AsNumber()) == val) { static_cast<int32_t>(right->raw_value()->AsNumber()) == val) {
return true; return true;
} }
} }
@ -1412,7 +1545,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
double val) { double val) {
DCHECK_NOT_NULL(expr->right()); DCHECK_NOT_NULL(expr->right());
if (expr->op() == op && expr->right()->IsLiteral() && if (expr->op() == op && expr->right()->IsLiteral() &&
TypeOf(expr) == kAstF64) { TypeOf(expr) == kWasmF64) {
Literal* right = expr->right()->AsLiteral(); Literal* right = expr->right()->AsLiteral();
DCHECK(right->raw_value()->IsNumber()); DCHECK(right->raw_value()->IsNumber());
if (right->raw_value()->AsNumber() == val) { if (right->raw_value()->AsNumber() == val) {
@ -1426,7 +1559,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
ConvertOperation MatchOr(BinaryOperation* expr) { ConvertOperation MatchOr(BinaryOperation* expr) {
if (MatchIntBinaryOperation(expr, Token::BIT_OR, 0) && if (MatchIntBinaryOperation(expr, Token::BIT_OR, 0) &&
(TypeOf(expr->left()) == kAstI32)) { (TypeOf(expr->left()) == kWasmI32)) {
return kAsIs; return kAsIs;
} else { } else {
return kNone; return kNone;
@ -1436,7 +1569,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
ConvertOperation MatchShr(BinaryOperation* expr) { ConvertOperation MatchShr(BinaryOperation* expr) {
if (MatchIntBinaryOperation(expr, Token::SHR, 0)) { if (MatchIntBinaryOperation(expr, Token::SHR, 0)) {
// TODO(titzer): this probably needs to be kToUint // TODO(titzer): this probably needs to be kToUint
return (TypeOf(expr->left()) == kAstI32) ? kAsIs : kToInt; return (TypeOf(expr->left()) == kWasmI32) ? kAsIs : kToInt;
} else { } else {
return kNone; return kNone;
} }
@ -1444,13 +1577,13 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
ConvertOperation MatchXor(BinaryOperation* expr) { ConvertOperation MatchXor(BinaryOperation* expr) {
if (MatchIntBinaryOperation(expr, Token::BIT_XOR, 0xffffffff)) { if (MatchIntBinaryOperation(expr, Token::BIT_XOR, 0xffffffff)) {
DCHECK_EQ(kAstI32, TypeOf(expr->left())); DCHECK_EQ(kWasmI32, TypeOf(expr->left()));
DCHECK_EQ(kAstI32, TypeOf(expr->right())); DCHECK_EQ(kWasmI32, TypeOf(expr->right()));
BinaryOperation* op = expr->left()->AsBinaryOperation(); BinaryOperation* op = expr->left()->AsBinaryOperation();
if (op != nullptr) { if (op != nullptr) {
if (MatchIntBinaryOperation(op, Token::BIT_XOR, 0xffffffff)) { if (MatchIntBinaryOperation(op, Token::BIT_XOR, 0xffffffff)) {
DCHECK_EQ(kAstI32, TypeOf(op->right())); DCHECK_EQ(kWasmI32, TypeOf(op->right()));
if (TypeOf(op->left()) != kAstI32) { if (TypeOf(op->left()) != kWasmI32) {
return kToInt; return kToInt;
} else { } else {
return kAsIs; return kAsIs;
@ -1463,8 +1596,8 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
ConvertOperation MatchMul(BinaryOperation* expr) { ConvertOperation MatchMul(BinaryOperation* expr) {
if (MatchDoubleBinaryOperation(expr, Token::MUL, 1.0)) { if (MatchDoubleBinaryOperation(expr, Token::MUL, 1.0)) {
DCHECK_EQ(kAstF64, TypeOf(expr->right())); DCHECK_EQ(kWasmF64, TypeOf(expr->right()));
if (TypeOf(expr->left()) != kAstF64) { if (TypeOf(expr->left()) != kWasmF64) {
return kToDouble; return kToDouble;
} else { } else {
return kAsIs; return kAsIs;
@ -1532,6 +1665,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
void VisitBinaryOperation(BinaryOperation* expr) { void VisitBinaryOperation(BinaryOperation* expr) {
ConvertOperation convertOperation = MatchBinaryOperation(expr); ConvertOperation convertOperation = MatchBinaryOperation(expr);
static const bool kDontIgnoreSign = false; static const bool kDontIgnoreSign = false;
parent_binop_ = expr;
if (convertOperation == kToDouble) { if (convertOperation == kToDouble) {
RECURSE(Visit(expr->left())); RECURSE(Visit(expr->left()));
TypeIndex type = TypeIndexOf(expr->left(), kDontIgnoreSign); TypeIndex type = TypeIndexOf(expr->left(), kDontIgnoreSign);
@ -1694,6 +1828,9 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
void VisitDeclarations(Declaration::List* decls) { void VisitDeclarations(Declaration::List* decls) {
for (Declaration* decl : *decls) { for (Declaration* decl : *decls) {
RECURSE(Visit(decl)); RECURSE(Visit(decl));
if (typer_failed_) {
return;
}
} }
} }
@ -1719,7 +1856,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
uint32_t index; uint32_t index;
}; };
uint32_t LookupOrInsertLocal(Variable* v, LocalType type) { uint32_t LookupOrInsertLocal(Variable* v, ValueType type) {
DCHECK_NOT_NULL(current_function_builder_); DCHECK_NOT_NULL(current_function_builder_);
ZoneHashMap::Entry* entry = ZoneHashMap::Entry* entry =
local_variables_.Lookup(v, ComputePointerHash(v)); local_variables_.Lookup(v, ComputePointerHash(v));
@ -1736,7 +1873,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
return (reinterpret_cast<IndexContainer*>(entry->value))->index; return (reinterpret_cast<IndexContainer*>(entry->value))->index;
} }
void InsertParameter(Variable* v, LocalType type, uint32_t index) { void InsertParameter(Variable* v, ValueType type, uint32_t index) {
DCHECK(v->IsParameter()); DCHECK(v->IsParameter());
DCHECK_NOT_NULL(current_function_builder_); DCHECK_NOT_NULL(current_function_builder_);
ZoneHashMap::Entry* entry = ZoneHashMap::Entry* entry =
@ -1749,7 +1886,7 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
entry->value = container; entry->value = container;
} }
uint32_t LookupOrInsertGlobal(Variable* v, LocalType type) { uint32_t LookupOrInsertGlobal(Variable* v, ValueType type) {
ZoneHashMap::Entry* entry = ZoneHashMap::Entry* entry =
global_variables_.Lookup(v, ComputePointerHash(v)); global_variables_.Lookup(v, ComputePointerHash(v));
if (entry == nullptr) { if (entry == nullptr) {
@ -1770,14 +1907,14 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
auto* func_type = typer_->TypeOf(v)->AsFunctionType(); auto* func_type = typer_->TypeOf(v)->AsFunctionType();
DCHECK_NOT_NULL(func_type); DCHECK_NOT_NULL(func_type);
// Build the signature for the function. // Build the signature for the function.
LocalType return_type = TypeFrom(func_type->ReturnType()); ValueType return_type = TypeFrom(func_type->ReturnType());
const auto& arguments = func_type->Arguments(); const auto& arguments = func_type->Arguments();
FunctionSig::Builder b(zone(), return_type == kAstStmt ? 0 : 1, FunctionSig::Builder b(zone(), return_type == kWasmStmt ? 0 : 1,
arguments.size()); arguments.size());
if (return_type != kAstStmt) b.AddReturn(return_type); if (return_type != kWasmStmt) b.AddReturn(return_type);
for (int i = 0; i < static_cast<int>(arguments.size()); ++i) { for (int i = 0; i < static_cast<int>(arguments.size()); ++i) {
LocalType type = TypeFrom(arguments[i]); ValueType type = TypeFrom(arguments[i]);
DCHECK_NE(kAstStmt, type); DCHECK_NE(kWasmStmt, type);
b.AddParam(type); b.AddParam(type);
} }
@ -1792,22 +1929,22 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
return (reinterpret_cast<WasmFunctionBuilder*>(entry->value)); return (reinterpret_cast<WasmFunctionBuilder*>(entry->value));
} }
LocalType TypeOf(Expression* expr) { return TypeFrom(typer_->TypeOf(expr)); } ValueType TypeOf(Expression* expr) { return TypeFrom(typer_->TypeOf(expr)); }
LocalType TypeFrom(AsmType* type) { ValueType TypeFrom(AsmType* type) {
if (type->IsA(AsmType::Intish())) { if (type->IsA(AsmType::Intish())) {
return kAstI32; return kWasmI32;
} }
if (type->IsA(AsmType::Floatish())) { if (type->IsA(AsmType::Floatish())) {
return kAstF32; return kWasmF32;
} }
if (type->IsA(AsmType::DoubleQ())) { if (type->IsA(AsmType::DoubleQ())) {
return kAstF64; return kWasmF64;
} }
return kAstStmt; return kWasmStmt;
} }
Zone* zone() { return zone_; } Zone* zone() { return zone_; }
@ -1821,7 +1958,12 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
FunctionLiteral* literal_; FunctionLiteral* literal_;
Isolate* isolate_; Isolate* isolate_;
Zone* zone_; Zone* zone_;
CompilationInfo* info_;
AstValueFactory* ast_value_factory_;
Handle<Script> script_;
AsmTyper* typer_; AsmTyper* typer_;
bool typer_failed_;
bool typer_finished_;
ZoneVector<std::pair<BreakableStatement*, bool>> breakable_blocks_; ZoneVector<std::pair<BreakableStatement*, bool>> breakable_blocks_;
ZoneVector<ForeignVariable> foreign_variables_; ZoneVector<ForeignVariable> foreign_variables_;
WasmFunctionBuilder* init_function_; WasmFunctionBuilder* init_function_;
@ -1829,6 +1971,9 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
uint32_t next_table_index_; uint32_t next_table_index_;
ZoneHashMap function_tables_; ZoneHashMap function_tables_;
ImportedFunctionTable imported_function_table_; ImportedFunctionTable imported_function_table_;
// Remember the parent node for reporting the correct location for ToNumber
// conversions after calls.
BinaryOperation* parent_binop_;
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS(); DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
@ -1836,22 +1981,24 @@ class AsmWasmBuilderImpl final : public AstVisitor<AsmWasmBuilderImpl> {
DISALLOW_COPY_AND_ASSIGN(AsmWasmBuilderImpl); DISALLOW_COPY_AND_ASSIGN(AsmWasmBuilderImpl);
}; };
AsmWasmBuilder::AsmWasmBuilder(Isolate* isolate, Zone* zone, AsmWasmBuilder::AsmWasmBuilder(CompilationInfo* info)
FunctionLiteral* literal, AsmTyper* typer) : info_(info),
: isolate_(isolate), zone_(zone), literal_(literal), typer_(typer) {} typer_(info->isolate(), info->zone(), info->script(), info->literal()) {}
// TODO(aseemgarg): probably should take zone (to write wasm to) as input so // TODO(aseemgarg): probably should take zone (to write wasm to) as input so
// that zone in constructor may be thrown away once wasm module is written. // that zone in constructor may be thrown away once wasm module is written.
AsmWasmBuilder::Result AsmWasmBuilder::Run( AsmWasmBuilder::Result AsmWasmBuilder::Run(Handle<FixedArray>* foreign_args) {
i::Handle<i::FixedArray>* foreign_args) { Zone* zone = info_->zone();
AsmWasmBuilderImpl impl(isolate_, zone_, literal_, typer_); AsmWasmBuilderImpl impl(info_->isolate(), zone, info_,
impl.Build(); info_->parse_info()->ast_value_factory(),
info_->script(), info_->literal(), &typer_);
bool success = impl.Build();
*foreign_args = impl.GetForeignArgs(); *foreign_args = impl.GetForeignArgs();
ZoneBuffer* module_buffer = new (zone_) ZoneBuffer(zone_); ZoneBuffer* module_buffer = new (zone) ZoneBuffer(zone);
impl.builder_->WriteTo(*module_buffer); impl.builder_->WriteTo(*module_buffer);
ZoneBuffer* asm_offsets_buffer = new (zone_) ZoneBuffer(zone_); ZoneBuffer* asm_offsets_buffer = new (zone) ZoneBuffer(zone);
impl.builder_->WriteAsmJsOffsetTable(*asm_offsets_buffer); impl.builder_->WriteAsmJsOffsetTable(*asm_offsets_buffer);
return {module_buffer, asm_offsets_buffer}; return {module_buffer, asm_offsets_buffer, success};
} }
const char* AsmWasmBuilder::foreign_init_name = "__foreign_init__"; const char* AsmWasmBuilder::foreign_init_name = "__foreign_init__";

View File

@ -14,7 +14,7 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
class FunctionLiteral; class CompilationInfo;
namespace wasm { namespace wasm {
@ -23,20 +23,20 @@ class AsmWasmBuilder {
struct Result { struct Result {
ZoneBuffer* module_bytes; ZoneBuffer* module_bytes;
ZoneBuffer* asm_offset_table; ZoneBuffer* asm_offset_table;
bool success;
}; };
explicit AsmWasmBuilder(Isolate* isolate, Zone* zone, FunctionLiteral* root, explicit AsmWasmBuilder(CompilationInfo* info);
AsmTyper* typer);
Result Run(Handle<FixedArray>* foreign_args); Result Run(Handle<FixedArray>* foreign_args);
static const char* foreign_init_name; static const char* foreign_init_name;
static const char* single_function_name; static const char* single_function_name;
const AsmTyper* typer() { return &typer_; }
private: private:
Isolate* isolate_; CompilationInfo* info_;
Zone* zone_; AsmTyper typer_;
FunctionLiteral* literal_;
AsmTyper* typer_;
}; };
} // namespace wasm } // namespace wasm
} // namespace internal } // namespace internal

32
deps/v8/src/assembler-inl.h vendored Normal file
View File

@ -0,0 +1,32 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_ASSEMBLER_INL_H_
#define V8_ASSEMBLER_INL_H_
#include "src/assembler.h"
#if V8_TARGET_ARCH_IA32
#include "src/ia32/assembler-ia32-inl.h"
#elif V8_TARGET_ARCH_X64
#include "src/x64/assembler-x64-inl.h"
#elif V8_TARGET_ARCH_ARM64
#include "src/arm64/assembler-arm64-inl.h"
#elif V8_TARGET_ARCH_ARM
#include "src/arm/assembler-arm-inl.h"
#elif V8_TARGET_ARCH_PPC
#include "src/ppc/assembler-ppc-inl.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/assembler-mips-inl.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/mips64/assembler-mips64-inl.h"
#elif V8_TARGET_ARCH_S390
#include "src/s390/assembler-s390-inl.h"
#elif V8_TARGET_ARCH_X87
#include "src/x87/assembler-x87-inl.h"
#else
#error Unknown architecture.
#endif
#endif // V8_ASSEMBLER_INL_H_

View File

@ -35,8 +35,11 @@
#include "src/assembler.h" #include "src/assembler.h"
#include <math.h> #include <math.h>
#include <string.h>
#include <cmath> #include <cmath>
#include "src/api.h" #include "src/api.h"
#include "src/assembler-inl.h"
#include "src/base/cpu.h" #include "src/base/cpu.h"
#include "src/base/functional.h" #include "src/base/functional.h"
#include "src/base/ieee754.h" #include "src/base/ieee754.h"
@ -62,28 +65,6 @@
#include "src/snapshot/serializer-common.h" #include "src/snapshot/serializer-common.h"
#include "src/wasm/wasm-external-refs.h" #include "src/wasm/wasm-external-refs.h"
#if V8_TARGET_ARCH_IA32
#include "src/ia32/assembler-ia32-inl.h" // NOLINT
#elif V8_TARGET_ARCH_X64
#include "src/x64/assembler-x64-inl.h" // NOLINT
#elif V8_TARGET_ARCH_ARM64
#include "src/arm64/assembler-arm64-inl.h" // NOLINT
#elif V8_TARGET_ARCH_ARM
#include "src/arm/assembler-arm-inl.h" // NOLINT
#elif V8_TARGET_ARCH_PPC
#include "src/ppc/assembler-ppc-inl.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS
#include "src/mips/assembler-mips-inl.h" // NOLINT
#elif V8_TARGET_ARCH_MIPS64
#include "src/mips64/assembler-mips64-inl.h" // NOLINT
#elif V8_TARGET_ARCH_S390
#include "src/s390/assembler-s390-inl.h" // NOLINT
#elif V8_TARGET_ARCH_X87
#include "src/x87/assembler-x87-inl.h" // NOLINT
#else
#error "Unknown architecture."
#endif
// Include native regexp-macro-assembler. // Include native regexp-macro-assembler.
#ifndef V8_INTERPRETED_REGEXP #ifndef V8_INTERPRETED_REGEXP
#if V8_TARGET_ARCH_IA32 #if V8_TARGET_ARCH_IA32
@ -353,8 +334,7 @@ void RelocInfo::update_wasm_memory_reference(
uint32_t current_size_reference = wasm_memory_size_reference(); uint32_t current_size_reference = wasm_memory_size_reference();
uint32_t updated_size_reference = uint32_t updated_size_reference =
new_size + (current_size_reference - old_size); new_size + (current_size_reference - old_size);
unchecked_update_wasm_memory_size(updated_size_reference, unchecked_update_wasm_size(updated_size_reference, icache_flush_mode);
icache_flush_mode);
} else { } else {
UNREACHABLE(); UNREACHABLE();
} }
@ -378,6 +358,18 @@ void RelocInfo::update_wasm_global_reference(
} }
} }
void RelocInfo::update_wasm_function_table_size_reference(
uint32_t old_size, uint32_t new_size, ICacheFlushMode icache_flush_mode) {
DCHECK(IsWasmFunctionTableSizeReference(rmode_));
uint32_t current_size_reference = wasm_function_table_size_reference();
uint32_t updated_size_reference =
new_size + (current_size_reference - old_size);
unchecked_update_wasm_size(updated_size_reference, icache_flush_mode);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(isolate_, pc_, sizeof(int64_t));
}
}
void RelocInfo::set_target_address(Address target, void RelocInfo::set_target_address(Address target,
WriteBarrierMode write_barrier_mode, WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) { ICacheFlushMode icache_flush_mode) {
@ -782,14 +774,14 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "debug break slot at tail call"; return "debug break slot at tail call";
case CODE_AGE_SEQUENCE: case CODE_AGE_SEQUENCE:
return "code age sequence"; return "code age sequence";
case GENERATOR_CONTINUATION:
return "generator continuation";
case WASM_MEMORY_REFERENCE: case WASM_MEMORY_REFERENCE:
return "wasm memory reference"; return "wasm memory reference";
case WASM_MEMORY_SIZE_REFERENCE: case WASM_MEMORY_SIZE_REFERENCE:
return "wasm memory size reference"; return "wasm memory size reference";
case WASM_GLOBAL_REFERENCE: case WASM_GLOBAL_REFERENCE:
return "wasm global value reference"; return "wasm global value reference";
case WASM_FUNCTION_TABLE_SIZE_REFERENCE:
return "wasm function table size reference";
case NUMBER_OF_MODES: case NUMBER_OF_MODES:
case PC_JUMP: case PC_JUMP:
UNREACHABLE(); UNREACHABLE();
@ -884,10 +876,10 @@ void RelocInfo::Verify(Isolate* isolate) {
case DEBUG_BREAK_SLOT_AT_RETURN: case DEBUG_BREAK_SLOT_AT_RETURN:
case DEBUG_BREAK_SLOT_AT_CALL: case DEBUG_BREAK_SLOT_AT_CALL:
case DEBUG_BREAK_SLOT_AT_TAIL_CALL: case DEBUG_BREAK_SLOT_AT_TAIL_CALL:
case GENERATOR_CONTINUATION:
case WASM_MEMORY_REFERENCE: case WASM_MEMORY_REFERENCE:
case WASM_MEMORY_SIZE_REFERENCE: case WASM_MEMORY_SIZE_REFERENCE:
case WASM_GLOBAL_REFERENCE: case WASM_GLOBAL_REFERENCE:
case WASM_FUNCTION_TABLE_SIZE_REFERENCE:
case NONE32: case NONE32:
case NONE64: case NONE64:
break; break;
@ -1204,6 +1196,12 @@ ExternalReference ExternalReference::f64_mod_wrapper_function(
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_mod_wrapper))); return ExternalReference(Redirect(isolate, FUNCTION_ADDR(f64_mod_wrapper)));
} }
ExternalReference ExternalReference::wasm_call_trap_callback_for_testing(
Isolate* isolate) {
return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(wasm::call_trap_callback_for_testing)));
}
ExternalReference ExternalReference::log_enter_external_function( ExternalReference ExternalReference::log_enter_external_function(
Isolate* isolate) { Isolate* isolate) {
return ExternalReference( return ExternalReference(
@ -1548,6 +1546,14 @@ ExternalReference ExternalReference::ieee754_tanh_function(Isolate* isolate) {
Redirect(isolate, FUNCTION_ADDR(base::ieee754::tanh), BUILTIN_FP_CALL)); Redirect(isolate, FUNCTION_ADDR(base::ieee754::tanh), BUILTIN_FP_CALL));
} }
void* libc_memchr(void* string, int character, size_t search_length) {
return memchr(string, character, search_length);
}
ExternalReference ExternalReference::libc_memchr_function(Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(libc_memchr)));
}
ExternalReference ExternalReference::page_flags(Page* page) { ExternalReference ExternalReference::page_flags(Page* page) {
return ExternalReference(reinterpret_cast<Address>(page) + return ExternalReference(reinterpret_cast<Address>(page) +
MemoryChunk::kFlagsOffset); MemoryChunk::kFlagsOffset);
@ -1569,11 +1575,19 @@ ExternalReference ExternalReference::is_tail_call_elimination_enabled_address(
return ExternalReference(isolate->is_tail_call_elimination_enabled_address()); return ExternalReference(isolate->is_tail_call_elimination_enabled_address());
} }
ExternalReference ExternalReference::promise_hook_address(Isolate* isolate) {
return ExternalReference(isolate->promise_hook_address());
}
ExternalReference ExternalReference::debug_is_active_address( ExternalReference ExternalReference::debug_is_active_address(
Isolate* isolate) { Isolate* isolate) {
return ExternalReference(isolate->debug()->is_active_address()); return ExternalReference(isolate->debug()->is_active_address());
} }
ExternalReference ExternalReference::debug_hook_on_function_call_address(
Isolate* isolate) {
return ExternalReference(isolate->debug()->hook_on_function_call_address());
}
ExternalReference ExternalReference::debug_after_break_target_address( ExternalReference ExternalReference::debug_after_break_target_address(
Isolate* isolate) { Isolate* isolate) {
@ -1914,12 +1928,6 @@ void Assembler::RecordComment(const char* msg) {
} }
void Assembler::RecordGeneratorContinuation() {
EnsureSpace ensure_space(this);
RecordRelocInfo(RelocInfo::GENERATOR_CONTINUATION);
}
void Assembler::RecordDebugBreakSlot(RelocInfo::Mode mode) { void Assembler::RecordDebugBreakSlot(RelocInfo::Mode mode) {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
DCHECK(RelocInfo::IsDebugBreakSlot(mode)); DCHECK(RelocInfo::IsDebugBreakSlot(mode));

View File

@ -395,6 +395,7 @@ class RelocInfo {
WASM_MEMORY_REFERENCE, WASM_MEMORY_REFERENCE,
WASM_GLOBAL_REFERENCE, WASM_GLOBAL_REFERENCE,
WASM_MEMORY_SIZE_REFERENCE, WASM_MEMORY_SIZE_REFERENCE,
WASM_FUNCTION_TABLE_SIZE_REFERENCE,
CELL, CELL,
// Everything after runtime_entry (inclusive) is not GC'ed. // Everything after runtime_entry (inclusive) is not GC'ed.
@ -413,9 +414,6 @@ class RelocInfo {
// Encoded internal reference, used only on MIPS, MIPS64 and PPC. // Encoded internal reference, used only on MIPS, MIPS64 and PPC.
INTERNAL_REFERENCE_ENCODED, INTERNAL_REFERENCE_ENCODED,
// Continuation points for a generator yield.
GENERATOR_CONTINUATION,
// Marks constant and veneer pools. Only used on ARM and ARM64. // Marks constant and veneer pools. Only used on ARM and ARM64.
// They use a custom noncompact encoding. // They use a custom noncompact encoding.
CONST_POOL, CONST_POOL,
@ -440,7 +438,7 @@ class RelocInfo {
FIRST_REAL_RELOC_MODE = CODE_TARGET, FIRST_REAL_RELOC_MODE = CODE_TARGET,
LAST_REAL_RELOC_MODE = VENEER_POOL, LAST_REAL_RELOC_MODE = VENEER_POOL,
LAST_CODE_ENUM = DEBUGGER_STATEMENT, LAST_CODE_ENUM = DEBUGGER_STATEMENT,
LAST_GCED_ENUM = WASM_MEMORY_SIZE_REFERENCE, LAST_GCED_ENUM = WASM_FUNCTION_TABLE_SIZE_REFERENCE,
FIRST_SHAREABLE_RELOC_MODE = CELL, FIRST_SHAREABLE_RELOC_MODE = CELL,
}; };
@ -524,9 +522,6 @@ class RelocInfo {
static inline bool IsCodeAgeSequence(Mode mode) { static inline bool IsCodeAgeSequence(Mode mode) {
return mode == CODE_AGE_SEQUENCE; return mode == CODE_AGE_SEQUENCE;
} }
static inline bool IsGeneratorContinuation(Mode mode) {
return mode == GENERATOR_CONTINUATION;
}
static inline bool IsWasmMemoryReference(Mode mode) { static inline bool IsWasmMemoryReference(Mode mode) {
return mode == WASM_MEMORY_REFERENCE; return mode == WASM_MEMORY_REFERENCE;
} }
@ -536,6 +531,22 @@ class RelocInfo {
static inline bool IsWasmGlobalReference(Mode mode) { static inline bool IsWasmGlobalReference(Mode mode) {
return mode == WASM_GLOBAL_REFERENCE; return mode == WASM_GLOBAL_REFERENCE;
} }
static inline bool IsWasmFunctionTableSizeReference(Mode mode) {
return mode == WASM_FUNCTION_TABLE_SIZE_REFERENCE;
}
static inline bool IsWasmReference(Mode mode) {
return mode == WASM_MEMORY_REFERENCE || mode == WASM_GLOBAL_REFERENCE ||
mode == WASM_MEMORY_SIZE_REFERENCE ||
mode == WASM_FUNCTION_TABLE_SIZE_REFERENCE;
}
static inline bool IsWasmSizeReference(Mode mode) {
return mode == WASM_MEMORY_SIZE_REFERENCE ||
mode == WASM_FUNCTION_TABLE_SIZE_REFERENCE;
}
static inline bool IsWasmPtrReference(Mode mode) {
return mode == WASM_MEMORY_REFERENCE || mode == WASM_GLOBAL_REFERENCE;
}
static inline int ModeMask(Mode mode) { return 1 << mode; } static inline int ModeMask(Mode mode) { return 1 << mode; }
// Accessors // Accessors
@ -564,6 +575,7 @@ class RelocInfo {
Address wasm_memory_reference(); Address wasm_memory_reference();
Address wasm_global_reference(); Address wasm_global_reference();
uint32_t wasm_function_table_size_reference();
uint32_t wasm_memory_size_reference(); uint32_t wasm_memory_size_reference();
void update_wasm_memory_reference( void update_wasm_memory_reference(
Address old_base, Address new_base, uint32_t old_size, uint32_t new_size, Address old_base, Address new_base, uint32_t old_size, uint32_t new_size,
@ -571,6 +583,9 @@ class RelocInfo {
void update_wasm_global_reference( void update_wasm_global_reference(
Address old_base, Address new_base, Address old_base, Address new_base,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED); ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void update_wasm_function_table_size_reference(
uint32_t old_base, uint32_t new_base,
ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
void set_target_address( void set_target_address(
Address target, Address target,
WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER, WriteBarrierMode write_barrier_mode = UPDATE_WRITE_BARRIER,
@ -679,8 +694,7 @@ class RelocInfo {
private: private:
void unchecked_update_wasm_memory_reference(Address address, void unchecked_update_wasm_memory_reference(Address address,
ICacheFlushMode flush_mode); ICacheFlushMode flush_mode);
void unchecked_update_wasm_memory_size(uint32_t size, void unchecked_update_wasm_size(uint32_t size, ICacheFlushMode flush_mode);
ICacheFlushMode flush_mode);
Isolate* isolate_; Isolate* isolate_;
// On ARM, note that pc_ is the address of the constant pool entry // On ARM, note that pc_ is the address of the constant pool entry
@ -949,6 +963,10 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference f64_asin_wrapper_function(Isolate* isolate); static ExternalReference f64_asin_wrapper_function(Isolate* isolate);
static ExternalReference f64_mod_wrapper_function(Isolate* isolate); static ExternalReference f64_mod_wrapper_function(Isolate* isolate);
// Trap callback function for cctest/wasm/wasm-run-utils.h
static ExternalReference wasm_call_trap_callback_for_testing(
Isolate* isolate);
// Log support. // Log support.
static ExternalReference log_enter_external_function(Isolate* isolate); static ExternalReference log_enter_external_function(Isolate* isolate);
static ExternalReference log_leave_external_function(Isolate* isolate); static ExternalReference log_leave_external_function(Isolate* isolate);
@ -1031,6 +1049,8 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference ieee754_tan_function(Isolate* isolate); static ExternalReference ieee754_tan_function(Isolate* isolate);
static ExternalReference ieee754_tanh_function(Isolate* isolate); static ExternalReference ieee754_tanh_function(Isolate* isolate);
static ExternalReference libc_memchr_function(Isolate* isolate);
static ExternalReference page_flags(Page* page); static ExternalReference page_flags(Page* page);
static ExternalReference ForDeoptEntry(Address entry); static ExternalReference ForDeoptEntry(Address entry);
@ -1041,12 +1061,16 @@ class ExternalReference BASE_EMBEDDED {
Isolate* isolate); Isolate* isolate);
static ExternalReference debug_is_active_address(Isolate* isolate); static ExternalReference debug_is_active_address(Isolate* isolate);
static ExternalReference debug_hook_on_function_call_address(
Isolate* isolate);
static ExternalReference debug_after_break_target_address(Isolate* isolate); static ExternalReference debug_after_break_target_address(Isolate* isolate);
static ExternalReference is_profiling_address(Isolate* isolate); static ExternalReference is_profiling_address(Isolate* isolate);
static ExternalReference invoke_function_callback(Isolate* isolate); static ExternalReference invoke_function_callback(Isolate* isolate);
static ExternalReference invoke_accessor_getter_callback(Isolate* isolate); static ExternalReference invoke_accessor_getter_callback(Isolate* isolate);
static ExternalReference promise_hook_address(Isolate* isolate);
V8_EXPORT_PRIVATE static ExternalReference runtime_function_table_address( V8_EXPORT_PRIVATE static ExternalReference runtime_function_table_address(
Isolate* isolate); Isolate* isolate);
@ -1117,6 +1141,7 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, ExternalReference);
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Utility functions // Utility functions
void* libc_memchr(void* string, int character, size_t search_length);
inline int NumberOfBitsSet(uint32_t x) { inline int NumberOfBitsSet(uint32_t x) {
unsigned int num_bits_set; unsigned int num_bits_set;
@ -1144,7 +1169,7 @@ class CallWrapper {
// Called just after emitting a call, i.e., at the return site for the call. // Called just after emitting a call, i.e., at the return site for the call.
virtual void AfterCall() const = 0; virtual void AfterCall() const = 0;
// Return whether call needs to check for debug stepping. // Return whether call needs to check for debug stepping.
virtual bool NeedsDebugStepCheck() const { return false; } virtual bool NeedsDebugHookCheck() const { return false; }
}; };
@ -1163,7 +1188,7 @@ class CheckDebugStepCallWrapper : public CallWrapper {
virtual ~CheckDebugStepCallWrapper() {} virtual ~CheckDebugStepCallWrapper() {}
virtual void BeforeCall(int call_size) const {} virtual void BeforeCall(int call_size) const {}
virtual void AfterCall() const {} virtual void AfterCall() const {}
virtual bool NeedsDebugStepCheck() const { return true; } virtual bool NeedsDebugHookCheck() const { return true; }
}; };

View File

@ -83,15 +83,21 @@ PerThreadAssertScope<kType, kAllow>::PerThreadAssertScope()
template <PerThreadAssertType kType, bool kAllow> template <PerThreadAssertType kType, bool kAllow>
PerThreadAssertScope<kType, kAllow>::~PerThreadAssertScope() { PerThreadAssertScope<kType, kAllow>::~PerThreadAssertScope() {
if (data_ == nullptr) return;
Release();
}
template <PerThreadAssertType kType, bool kAllow>
void PerThreadAssertScope<kType, kAllow>::Release() {
DCHECK_NOT_NULL(data_); DCHECK_NOT_NULL(data_);
data_->Set(kType, old_state_); data_->Set(kType, old_state_);
if (data_->DecrementLevel()) { if (data_->DecrementLevel()) {
PerThreadAssertData::SetCurrent(NULL); PerThreadAssertData::SetCurrent(NULL);
delete data_; delete data_;
} }
data_ = nullptr;
} }
// static // static
template <PerThreadAssertType kType, bool kAllow> template <PerThreadAssertType kType, bool kAllow>
bool PerThreadAssertScope<kType, kAllow>::IsAllowed() { bool PerThreadAssertScope<kType, kAllow>::IsAllowed() {
@ -149,6 +155,8 @@ template class PerIsolateAssertScope<DEOPTIMIZATION_ASSERT, false>;
template class PerIsolateAssertScope<DEOPTIMIZATION_ASSERT, true>; template class PerIsolateAssertScope<DEOPTIMIZATION_ASSERT, true>;
template class PerIsolateAssertScope<COMPILATION_ASSERT, false>; template class PerIsolateAssertScope<COMPILATION_ASSERT, false>;
template class PerIsolateAssertScope<COMPILATION_ASSERT, true>; template class PerIsolateAssertScope<COMPILATION_ASSERT, true>;
template class PerIsolateAssertScope<NO_EXCEPTION_ASSERT, false>;
template class PerIsolateAssertScope<NO_EXCEPTION_ASSERT, true>;
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -26,12 +26,12 @@ enum PerThreadAssertType {
LAST_PER_THREAD_ASSERT_TYPE LAST_PER_THREAD_ASSERT_TYPE
}; };
enum PerIsolateAssertType { enum PerIsolateAssertType {
JAVASCRIPT_EXECUTION_ASSERT, JAVASCRIPT_EXECUTION_ASSERT,
JAVASCRIPT_EXECUTION_THROWS, JAVASCRIPT_EXECUTION_THROWS,
DEOPTIMIZATION_ASSERT, DEOPTIMIZATION_ASSERT,
COMPILATION_ASSERT COMPILATION_ASSERT,
NO_EXCEPTION_ASSERT
}; };
template <PerThreadAssertType kType, bool kAllow> template <PerThreadAssertType kType, bool kAllow>
@ -42,6 +42,8 @@ class PerThreadAssertScope {
V8_EXPORT_PRIVATE static bool IsAllowed(); V8_EXPORT_PRIVATE static bool IsAllowed();
void Release();
private: private:
PerThreadAssertData* data_; PerThreadAssertData* data_;
bool old_state_; bool old_state_;
@ -76,6 +78,7 @@ class PerThreadAssertScopeDebugOnly : public
class PerThreadAssertScopeDebugOnly { class PerThreadAssertScopeDebugOnly {
public: public:
PerThreadAssertScopeDebugOnly() { } PerThreadAssertScopeDebugOnly() { }
void Release() {}
#endif #endif
}; };
@ -147,6 +150,14 @@ typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_ASSERT, false>
typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_ASSERT, true> typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_ASSERT, true>
AllowJavascriptExecution; AllowJavascriptExecution;
// Scope to document where we do not expect javascript execution (debug only)
typedef PerIsolateAssertScopeDebugOnly<JAVASCRIPT_EXECUTION_ASSERT, false>
DisallowJavascriptExecutionDebugOnly;
// Scope to introduce an exception to DisallowJavascriptExecutionDebugOnly.
typedef PerIsolateAssertScopeDebugOnly<JAVASCRIPT_EXECUTION_ASSERT, true>
AllowJavascriptExecutionDebugOnly;
// Scope in which javascript execution leads to exception being thrown. // Scope in which javascript execution leads to exception being thrown.
typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_THROWS, false> typedef PerIsolateAssertScope<JAVASCRIPT_EXECUTION_THROWS, false>
ThrowOnJavascriptExecution; ThrowOnJavascriptExecution;
@ -170,6 +181,14 @@ typedef PerIsolateAssertScopeDebugOnly<COMPILATION_ASSERT, false>
// Scope to introduce an exception to DisallowDeoptimization. // Scope to introduce an exception to DisallowDeoptimization.
typedef PerIsolateAssertScopeDebugOnly<COMPILATION_ASSERT, true> typedef PerIsolateAssertScopeDebugOnly<COMPILATION_ASSERT, true>
AllowCompilation; AllowCompilation;
// Scope to document where we do not expect exceptions.
typedef PerIsolateAssertScopeDebugOnly<NO_EXCEPTION_ASSERT, false>
DisallowExceptions;
// Scope to introduce an exception to DisallowExceptions.
typedef PerIsolateAssertScopeDebugOnly<NO_EXCEPTION_ASSERT, true>
AllowExceptions;
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -2,8 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
#include "src/ast/ast.h"
#include "src/ast/ast-expression-rewriter.h" #include "src/ast/ast-expression-rewriter.h"
#include "src/ast/ast.h"
#include "src/objects-inl.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -372,6 +373,9 @@ void AstExpressionRewriter::VisitEmptyParentheses(EmptyParentheses* node) {
NOTHING(); NOTHING();
} }
void AstExpressionRewriter::VisitGetIterator(GetIterator* node) {
AST_REWRITE_PROPERTY(Expression, node, iterable);
}
void AstExpressionRewriter::VisitDoExpression(DoExpression* node) { void AstExpressionRewriter::VisitDoExpression(DoExpression* node) {
REWRITE_THIS(node); REWRITE_THIS(node);

View File

@ -0,0 +1,29 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/ast/ast-function-literal-id-reindexer.h"
#include "src/objects-inl.h"
#include "src/ast/ast.h"
namespace v8 {
namespace internal {
AstFunctionLiteralIdReindexer::AstFunctionLiteralIdReindexer(size_t stack_limit,
int delta)
: AstTraversalVisitor(stack_limit), delta_(delta) {}
AstFunctionLiteralIdReindexer::~AstFunctionLiteralIdReindexer() {}
void AstFunctionLiteralIdReindexer::Reindex(Expression* pattern) {
Visit(pattern);
}
void AstFunctionLiteralIdReindexer::VisitFunctionLiteral(FunctionLiteral* lit) {
AstTraversalVisitor::VisitFunctionLiteral(lit);
lit->set_function_literal_id(lit->function_literal_id() + delta_);
}
} // namespace internal
} // namespace v8

View File

@ -0,0 +1,36 @@
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_AST_AST_FUNCTION_LITERAL_ID_REINDEXER
#define V8_AST_AST_FUNCTION_LITERAL_ID_REINDEXER
#include "src/ast/ast-traversal-visitor.h"
#include "src/base/macros.h"
namespace v8 {
namespace internal {
// Changes the ID of all FunctionLiterals in the given Expression by adding the
// given delta.
class AstFunctionLiteralIdReindexer final
: public AstTraversalVisitor<AstFunctionLiteralIdReindexer> {
public:
AstFunctionLiteralIdReindexer(size_t stack_limit, int delta);
~AstFunctionLiteralIdReindexer();
void Reindex(Expression* pattern);
// AstTraversalVisitor implementation.
void VisitFunctionLiteral(FunctionLiteral* lit);
private:
int delta_;
DISALLOW_COPY_AND_ASSIGN(AstFunctionLiteralIdReindexer);
};
} // namespace internal
} // namespace v8
#endif // V8_AST_AST_FUNCTION_LITERAL_ID_REINDEXER

View File

@ -6,6 +6,7 @@
#include "src/ast/ast.h" #include "src/ast/ast.h"
#include "src/ast/scopes.h" #include "src/ast/scopes.h"
#include "src/objects-inl.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -186,6 +187,9 @@ void AstLiteralReindexer::VisitSpread(Spread* node) {
void AstLiteralReindexer::VisitEmptyParentheses(EmptyParentheses* node) {} void AstLiteralReindexer::VisitEmptyParentheses(EmptyParentheses* node) {}
void AstLiteralReindexer::VisitGetIterator(GetIterator* node) {
Visit(node->iterable());
}
void AstLiteralReindexer::VisitForInStatement(ForInStatement* node) { void AstLiteralReindexer::VisitForInStatement(ForInStatement* node) {
Visit(node->each()); Visit(node->each());

View File

@ -6,22 +6,26 @@
#include "src/ast/ast.h" #include "src/ast/ast.h"
#include "src/ast/scopes.h" #include "src/ast/scopes.h"
#include "src/compiler.h"
#include "src/objects-inl.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> { class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
public: public:
AstNumberingVisitor(Isolate* isolate, Zone* zone) AstNumberingVisitor(uintptr_t stack_limit, Zone* zone,
: isolate_(isolate), Compiler::EagerInnerFunctionLiterals* eager_literals)
zone_(zone), : zone_(zone),
eager_literals_(eager_literals),
next_id_(BailoutId::FirstUsable().ToInt()), next_id_(BailoutId::FirstUsable().ToInt()),
yield_count_(0), yield_count_(0),
properties_(zone), properties_(zone),
slot_cache_(zone), slot_cache_(zone),
disable_crankshaft_reason_(kNoReason),
dont_optimize_reason_(kNoReason), dont_optimize_reason_(kNoReason),
catch_prediction_(HandlerTable::UNCAUGHT) { catch_prediction_(HandlerTable::UNCAUGHT) {
InitializeAstVisitor(isolate); InitializeAstVisitor(stack_limit);
} }
bool Renumber(FunctionLiteral* node); bool Renumber(FunctionLiteral* node);
@ -55,25 +59,28 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
dont_optimize_reason_ = reason; dont_optimize_reason_ = reason;
DisableSelfOptimization(); DisableSelfOptimization();
} }
void DisableCrankshaft(BailoutReason reason) { void DisableFullCodegenAndCrankshaft(BailoutReason reason) {
properties_.flags() |= AstProperties::kDontCrankshaft; disable_crankshaft_reason_ = reason;
properties_.flags() |= AstProperties::kMustUseIgnitionTurbo;
} }
template <typename Node> template <typename Node>
void ReserveFeedbackSlots(Node* node) { void ReserveFeedbackSlots(Node* node) {
node->AssignFeedbackVectorSlots(isolate_, properties_.get_spec(), node->AssignFeedbackVectorSlots(properties_.get_spec(), &slot_cache_);
&slot_cache_);
} }
BailoutReason dont_optimize_reason() const { return dont_optimize_reason_; } BailoutReason dont_optimize_reason() const { return dont_optimize_reason_; }
Isolate* isolate_; Zone* zone() const { return zone_; }
Zone* zone_; Zone* zone_;
Compiler::EagerInnerFunctionLiterals* eager_literals_;
int next_id_; int next_id_;
int yield_count_; int yield_count_;
AstProperties properties_; AstProperties properties_;
// The slot cache allows us to reuse certain feedback vector slots. // The slot cache allows us to reuse certain feedback vector slots.
FeedbackVectorSlotCache slot_cache_; FeedbackVectorSlotCache slot_cache_;
BailoutReason disable_crankshaft_reason_;
BailoutReason dont_optimize_reason_; BailoutReason dont_optimize_reason_;
HandlerTable::CatchPrediction catch_prediction_; HandlerTable::CatchPrediction catch_prediction_;
@ -122,6 +129,7 @@ void AstNumberingVisitor::VisitNativeFunctionLiteral(
IncrementNodeCount(); IncrementNodeCount();
DisableOptimization(kNativeFunctionLiteral); DisableOptimization(kNativeFunctionLiteral);
node->set_base_id(ReserveIdRange(NativeFunctionLiteral::num_ids())); node->set_base_id(ReserveIdRange(NativeFunctionLiteral::num_ids()));
ReserveFeedbackSlots(node);
} }
@ -149,10 +157,11 @@ void AstNumberingVisitor::VisitVariableProxyReference(VariableProxy* node) {
IncrementNodeCount(); IncrementNodeCount();
switch (node->var()->location()) { switch (node->var()->location()) {
case VariableLocation::LOOKUP: case VariableLocation::LOOKUP:
DisableCrankshaft(kReferenceToAVariableWhichRequiresDynamicLookup); DisableFullCodegenAndCrankshaft(
kReferenceToAVariableWhichRequiresDynamicLookup);
break; break;
case VariableLocation::MODULE: case VariableLocation::MODULE:
DisableCrankshaft(kReferenceToModuleVariable); DisableFullCodegenAndCrankshaft(kReferenceToModuleVariable);
break; break;
default: default:
break; break;
@ -176,7 +185,7 @@ void AstNumberingVisitor::VisitThisFunction(ThisFunction* node) {
void AstNumberingVisitor::VisitSuperPropertyReference( void AstNumberingVisitor::VisitSuperPropertyReference(
SuperPropertyReference* node) { SuperPropertyReference* node) {
IncrementNodeCount(); IncrementNodeCount();
DisableCrankshaft(kSuperReference); DisableFullCodegenAndCrankshaft(kSuperReference);
node->set_base_id(ReserveIdRange(SuperPropertyReference::num_ids())); node->set_base_id(ReserveIdRange(SuperPropertyReference::num_ids()));
Visit(node->this_var()); Visit(node->this_var());
Visit(node->home_object()); Visit(node->home_object());
@ -185,7 +194,7 @@ void AstNumberingVisitor::VisitSuperPropertyReference(
void AstNumberingVisitor::VisitSuperCallReference(SuperCallReference* node) { void AstNumberingVisitor::VisitSuperCallReference(SuperCallReference* node) {
IncrementNodeCount(); IncrementNodeCount();
DisableCrankshaft(kSuperReference); DisableFullCodegenAndCrankshaft(kSuperReference);
node->set_base_id(ReserveIdRange(SuperCallReference::num_ids())); node->set_base_id(ReserveIdRange(SuperCallReference::num_ids()));
Visit(node->this_var()); Visit(node->this_var());
Visit(node->new_target_var()); Visit(node->new_target_var());
@ -282,8 +291,7 @@ void AstNumberingVisitor::VisitCallRuntime(CallRuntime* node) {
void AstNumberingVisitor::VisitWithStatement(WithStatement* node) { void AstNumberingVisitor::VisitWithStatement(WithStatement* node) {
IncrementNodeCount(); IncrementNodeCount();
DisableCrankshaft(kWithStatement); DisableFullCodegenAndCrankshaft(kWithStatement);
node->set_base_id(ReserveIdRange(WithStatement::num_ids()));
Visit(node->expression()); Visit(node->expression());
Visit(node->statement()); Visit(node->statement());
} }
@ -313,7 +321,7 @@ void AstNumberingVisitor::VisitWhileStatement(WhileStatement* node) {
void AstNumberingVisitor::VisitTryCatchStatement(TryCatchStatement* node) { void AstNumberingVisitor::VisitTryCatchStatement(TryCatchStatement* node) {
IncrementNodeCount(); IncrementNodeCount();
DisableCrankshaft(kTryCatchStatement); DisableFullCodegenAndCrankshaft(kTryCatchStatement);
{ {
const HandlerTable::CatchPrediction old_prediction = catch_prediction_; const HandlerTable::CatchPrediction old_prediction = catch_prediction_;
// This node uses its own prediction, unless it's "uncaught", in which case // This node uses its own prediction, unless it's "uncaught", in which case
@ -332,7 +340,7 @@ void AstNumberingVisitor::VisitTryCatchStatement(TryCatchStatement* node) {
void AstNumberingVisitor::VisitTryFinallyStatement(TryFinallyStatement* node) { void AstNumberingVisitor::VisitTryFinallyStatement(TryFinallyStatement* node) {
IncrementNodeCount(); IncrementNodeCount();
DisableCrankshaft(kTryFinallyStatement); DisableFullCodegenAndCrankshaft(kTryFinallyStatement);
// We can't know whether the finally block will override ("catch") an // We can't know whether the finally block will override ("catch") an
// exception thrown in the try block, so we just adopt the outer prediction. // exception thrown in the try block, so we just adopt the outer prediction.
node->set_catch_prediction(catch_prediction_); node->set_catch_prediction(catch_prediction_);
@ -393,14 +401,25 @@ void AstNumberingVisitor::VisitCompareOperation(CompareOperation* node) {
ReserveFeedbackSlots(node); ReserveFeedbackSlots(node);
} }
void AstNumberingVisitor::VisitSpread(Spread* node) {
void AstNumberingVisitor::VisitSpread(Spread* node) { UNREACHABLE(); } IncrementNodeCount();
// We can only get here from super calls currently.
DisableFullCodegenAndCrankshaft(kSuperReference);
node->set_base_id(ReserveIdRange(Spread::num_ids()));
Visit(node->expression());
}
void AstNumberingVisitor::VisitEmptyParentheses(EmptyParentheses* node) { void AstNumberingVisitor::VisitEmptyParentheses(EmptyParentheses* node) {
UNREACHABLE(); UNREACHABLE();
} }
void AstNumberingVisitor::VisitGetIterator(GetIterator* node) {
IncrementNodeCount();
DisableFullCodegenAndCrankshaft(kGetIterator);
node->set_base_id(ReserveIdRange(GetIterator::num_ids()));
Visit(node->iterable());
ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitForInStatement(ForInStatement* node) { void AstNumberingVisitor::VisitForInStatement(ForInStatement* node) {
IncrementNodeCount(); IncrementNodeCount();
@ -417,7 +436,7 @@ void AstNumberingVisitor::VisitForInStatement(ForInStatement* node) {
void AstNumberingVisitor::VisitForOfStatement(ForOfStatement* node) { void AstNumberingVisitor::VisitForOfStatement(ForOfStatement* node) {
IncrementNodeCount(); IncrementNodeCount();
DisableCrankshaft(kForOfStatement); DisableFullCodegenAndCrankshaft(kForOfStatement);
node->set_base_id(ReserveIdRange(ForOfStatement::num_ids())); node->set_base_id(ReserveIdRange(ForOfStatement::num_ids()));
Visit(node->assign_iterator()); // Not part of loop. Visit(node->assign_iterator()); // Not part of loop.
node->set_first_yield_id(yield_count_); node->set_first_yield_id(yield_count_);
@ -484,8 +503,8 @@ void AstNumberingVisitor::VisitForStatement(ForStatement* node) {
void AstNumberingVisitor::VisitClassLiteral(ClassLiteral* node) { void AstNumberingVisitor::VisitClassLiteral(ClassLiteral* node) {
IncrementNodeCount(); IncrementNodeCount();
DisableCrankshaft(kClassLiteral); DisableFullCodegenAndCrankshaft(kClassLiteral);
node->set_base_id(ReserveIdRange(node->num_ids())); node->set_base_id(ReserveIdRange(ClassLiteral::num_ids()));
if (node->extends()) Visit(node->extends()); if (node->extends()) Visit(node->extends());
if (node->constructor()) Visit(node->constructor()); if (node->constructor()) Visit(node->constructor());
if (node->class_variable_proxy()) { if (node->class_variable_proxy()) {
@ -504,7 +523,7 @@ void AstNumberingVisitor::VisitObjectLiteral(ObjectLiteral* node) {
for (int i = 0; i < node->properties()->length(); i++) { for (int i = 0; i < node->properties()->length(); i++) {
VisitLiteralProperty(node->properties()->at(i)); VisitLiteralProperty(node->properties()->at(i));
} }
node->BuildConstantProperties(isolate_); node->InitDepthAndFlags();
// Mark all computed expressions that are bound to a key that // Mark all computed expressions that are bound to a key that
// is shadowed by a later occurrence of the same key. For the // is shadowed by a later occurrence of the same key. For the
// marked expressions, no store code will be is emitted. // marked expressions, no store code will be is emitted.
@ -513,7 +532,8 @@ void AstNumberingVisitor::VisitObjectLiteral(ObjectLiteral* node) {
} }
void AstNumberingVisitor::VisitLiteralProperty(LiteralProperty* node) { void AstNumberingVisitor::VisitLiteralProperty(LiteralProperty* node) {
if (node->is_computed_name()) DisableCrankshaft(kComputedPropertyName); if (node->is_computed_name())
DisableFullCodegenAndCrankshaft(kComputedPropertyName);
Visit(node->key()); Visit(node->key());
Visit(node->value()); Visit(node->value());
} }
@ -524,12 +544,15 @@ void AstNumberingVisitor::VisitArrayLiteral(ArrayLiteral* node) {
for (int i = 0; i < node->values()->length(); i++) { for (int i = 0; i < node->values()->length(); i++) {
Visit(node->values()->at(i)); Visit(node->values()->at(i));
} }
node->BuildConstantElements(isolate_); node->InitDepthAndFlags();
ReserveFeedbackSlots(node); ReserveFeedbackSlots(node);
} }
void AstNumberingVisitor::VisitCall(Call* node) { void AstNumberingVisitor::VisitCall(Call* node) {
if (node->is_possibly_eval()) {
DisableFullCodegenAndCrankshaft(kFunctionCallsEval);
}
IncrementNodeCount(); IncrementNodeCount();
ReserveFeedbackSlots(node); ReserveFeedbackSlots(node);
node->set_base_id(ReserveIdRange(Call::num_ids())); node->set_base_id(ReserveIdRange(Call::num_ids()));
@ -569,8 +592,13 @@ void AstNumberingVisitor::VisitArguments(ZoneList<Expression*>* arguments) {
void AstNumberingVisitor::VisitFunctionLiteral(FunctionLiteral* node) { void AstNumberingVisitor::VisitFunctionLiteral(FunctionLiteral* node) {
IncrementNodeCount(); IncrementNodeCount();
node->set_base_id(ReserveIdRange(FunctionLiteral::num_ids())); node->set_base_id(ReserveIdRange(FunctionLiteral::num_ids()));
if (eager_literals_ && node->ShouldEagerCompile()) {
eager_literals_->Add(new (zone())
ThreadedListZoneEntry<FunctionLiteral*>(node));
}
// We don't recurse into the declarations or body of the function literal: // We don't recurse into the declarations or body of the function literal:
// you have to separately Renumber() each FunctionLiteral that you compile. // you have to separately Renumber() each FunctionLiteral that you compile.
ReserveFeedbackSlots(node);
} }
@ -584,22 +612,26 @@ void AstNumberingVisitor::VisitRewritableExpression(
bool AstNumberingVisitor::Renumber(FunctionLiteral* node) { bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
DeclarationScope* scope = node->scope(); DeclarationScope* scope = node->scope();
if (scope->new_target_var()) DisableCrankshaft(kSuperReference); if (scope->new_target_var() != nullptr ||
if (scope->calls_eval()) DisableCrankshaft(kFunctionCallsEval); scope->this_function_var() != nullptr) {
if (scope->arguments() != NULL && !scope->arguments()->IsStackAllocated()) { DisableFullCodegenAndCrankshaft(kSuperReference);
DisableCrankshaft(kContextAllocatedArguments); }
if (scope->arguments() != nullptr &&
!scope->arguments()->IsStackAllocated()) {
DisableFullCodegenAndCrankshaft(kContextAllocatedArguments);
} }
if (scope->rest_parameter() != nullptr) { if (scope->rest_parameter() != nullptr) {
DisableCrankshaft(kRestParameter); DisableFullCodegenAndCrankshaft(kRestParameter);
} }
if (IsGeneratorFunction(node->kind()) || IsAsyncFunction(node->kind())) { if (IsResumableFunction(node->kind())) {
DisableCrankshaft(kGenerator); DisableFullCodegenAndCrankshaft(kGenerator);
} }
if (IsClassConstructor(node->kind())) { if (IsClassConstructor(node->kind())) {
DisableCrankshaft(kClassConstructorFunction); DisableFullCodegenAndCrankshaft(kClassConstructorFunction);
} }
VisitDeclarations(scope->declarations()); VisitDeclarations(scope->declarations());
@ -608,13 +640,26 @@ bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
node->set_ast_properties(&properties_); node->set_ast_properties(&properties_);
node->set_dont_optimize_reason(dont_optimize_reason()); node->set_dont_optimize_reason(dont_optimize_reason());
node->set_yield_count(yield_count_); node->set_yield_count(yield_count_);
if (FLAG_trace_opt) {
if (disable_crankshaft_reason_ != kNoReason) {
PrintF("[enforcing Ignition and TurboFan for %s because: %s\n",
node->debug_name()->ToCString().get(),
GetBailoutReason(disable_crankshaft_reason_));
}
}
return !HasStackOverflow(); return !HasStackOverflow();
} }
bool AstNumbering::Renumber(
uintptr_t stack_limit, Zone* zone, FunctionLiteral* function,
Compiler::EagerInnerFunctionLiterals* eager_literals) {
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
bool AstNumbering::Renumber(Isolate* isolate, Zone* zone, AstNumberingVisitor visitor(stack_limit, zone, eager_literals);
FunctionLiteral* function) {
AstNumberingVisitor visitor(isolate, zone);
return visitor.Renumber(function); return visitor.Renumber(function);
} }
} // namespace internal } // namespace internal

View File

@ -5,6 +5,8 @@
#ifndef V8_AST_AST_NUMBERING_H_ #ifndef V8_AST_AST_NUMBERING_H_
#define V8_AST_AST_NUMBERING_H_ #define V8_AST_AST_NUMBERING_H_
#include <stdint.h>
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -12,11 +14,20 @@ namespace internal {
class FunctionLiteral; class FunctionLiteral;
class Isolate; class Isolate;
class Zone; class Zone;
template <typename T>
class ThreadedList;
template <typename T>
class ThreadedListZoneEntry;
template <typename T>
class ZoneVector;
namespace AstNumbering { namespace AstNumbering {
// Assign type feedback IDs, bailout IDs, and generator yield IDs to an AST node // Assign type feedback IDs, bailout IDs, and generator yield IDs to an AST node
// tree; perform catch prediction for TryStatements. // tree; perform catch prediction for TryStatements. If |eager_literals| is
bool Renumber(Isolate* isolate, Zone* zone, FunctionLiteral* function); // non-null, adds any eager inner literal functions into it.
bool Renumber(
uintptr_t stack_limit, Zone* zone, FunctionLiteral* function,
ThreadedList<ThreadedListZoneEntry<FunctionLiteral*>>* eager_literals);
} }
// Some details on yield IDs // Some details on yield IDs

View File

@ -288,7 +288,7 @@ void AstTraversalVisitor<Subclass>::VisitFunctionLiteral(
DeclarationScope* scope = expr->scope(); DeclarationScope* scope = expr->scope();
RECURSE_EXPRESSION(VisitDeclarations(scope->declarations())); RECURSE_EXPRESSION(VisitDeclarations(scope->declarations()));
// A lazily parsed function literal won't have a body. // A lazily parsed function literal won't have a body.
if (expr->scope()->is_lazily_parsed()) return; if (expr->scope()->was_lazily_parsed()) return;
RECURSE_EXPRESSION(VisitStatements(expr->body())); RECURSE_EXPRESSION(VisitStatements(expr->body()));
} }
@ -470,6 +470,12 @@ void AstTraversalVisitor<Subclass>::VisitEmptyParentheses(
PROCESS_EXPRESSION(expr); PROCESS_EXPRESSION(expr);
} }
template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitGetIterator(GetIterator* expr) {
PROCESS_EXPRESSION(expr);
RECURSE_EXPRESSION(Visit(expr->iterable()));
}
template <class Subclass> template <class Subclass>
void AstTraversalVisitor<Subclass>::VisitSuperPropertyReference( void AstTraversalVisitor<Subclass>::VisitSuperPropertyReference(
SuperPropertyReference* expr) { SuperPropertyReference* expr) {

View File

@ -7,6 +7,7 @@
#include "src/ast/ast-types.h" #include "src/ast/ast-types.h"
#include "src/handles-inl.h" #include "src/handles-inl.h"
#include "src/objects-inl.h"
#include "src/ostreams.h" #include "src/ostreams.h"
namespace v8 { namespace v8 {
@ -209,7 +210,6 @@ AstType::bitset AstBitsetType::Lub(i::Map* map) {
case JS_CONTEXT_EXTENSION_OBJECT_TYPE: case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_GENERATOR_OBJECT_TYPE: case JS_GENERATOR_OBJECT_TYPE:
case JS_MODULE_NAMESPACE_TYPE: case JS_MODULE_NAMESPACE_TYPE:
case JS_FIXED_ARRAY_ITERATOR_TYPE:
case JS_ARRAY_BUFFER_TYPE: case JS_ARRAY_BUFFER_TYPE:
case JS_ARRAY_TYPE: case JS_ARRAY_TYPE:
case JS_REGEXP_TYPE: // TODO(rossberg): there should be a RegExp type. case JS_REGEXP_TYPE: // TODO(rossberg): there should be a RegExp type.
@ -259,6 +259,7 @@ AstType::bitset AstBitsetType::Lub(i::Map* map) {
case JS_WEAK_MAP_TYPE: case JS_WEAK_MAP_TYPE:
case JS_WEAK_SET_TYPE: case JS_WEAK_SET_TYPE:
case JS_PROMISE_CAPABILITY_TYPE:
case JS_PROMISE_TYPE: case JS_PROMISE_TYPE:
case JS_BOUND_FUNCTION_TYPE: case JS_BOUND_FUNCTION_TYPE:
DCHECK(!map->is_undetectable()); DCHECK(!map->is_undetectable());
@ -304,8 +305,6 @@ AstType::bitset AstBitsetType::Lub(i::Map* map) {
case PROMISE_REACTION_JOB_INFO_TYPE: case PROMISE_REACTION_JOB_INFO_TYPE:
case FUNCTION_TEMPLATE_INFO_TYPE: case FUNCTION_TEMPLATE_INFO_TYPE:
case OBJECT_TEMPLATE_INFO_TYPE: case OBJECT_TEMPLATE_INFO_TYPE:
case SIGNATURE_INFO_TYPE:
case TYPE_SWITCH_INFO_TYPE:
case ALLOCATION_MEMENTO_TYPE: case ALLOCATION_MEMENTO_TYPE:
case TYPE_FEEDBACK_INFO_TYPE: case TYPE_FEEDBACK_INFO_TYPE:
case ALIASED_ARGUMENTS_ENTRY_TYPE: case ALIASED_ARGUMENTS_ENTRY_TYPE:
@ -315,8 +314,10 @@ AstType::bitset AstBitsetType::Lub(i::Map* map) {
case CELL_TYPE: case CELL_TYPE:
case WEAK_CELL_TYPE: case WEAK_CELL_TYPE:
case PROTOTYPE_INFO_TYPE: case PROTOTYPE_INFO_TYPE:
case TUPLE2_TYPE:
case TUPLE3_TYPE: case TUPLE3_TYPE:
case CONTEXT_EXTENSION_TYPE: case CONTEXT_EXTENSION_TYPE:
case CONSTANT_ELEMENTS_PAIR_TYPE:
UNREACHABLE(); UNREACHABLE();
return kNone; return kNone;
} }

View File

@ -28,6 +28,8 @@
#include "src/ast/ast-value-factory.h" #include "src/ast/ast-value-factory.h"
#include "src/api.h" #include "src/api.h"
#include "src/char-predicates-inl.h"
#include "src/objects-inl.h"
#include "src/objects.h" #include "src/objects.h"
#include "src/utils.h" #include "src/utils.h"
@ -219,9 +221,17 @@ void AstValue::Internalize(Isolate* isolate) {
} }
} }
AstRawString* AstValueFactory::GetOneByteStringInternal( AstRawString* AstValueFactory::GetOneByteStringInternal(
Vector<const uint8_t> literal) { Vector<const uint8_t> literal) {
if (literal.length() == 1 && IsInRange(literal[0], 'a', 'z')) {
int key = literal[0] - 'a';
if (one_character_strings_[key] == nullptr) {
uint32_t hash = StringHasher::HashSequentialString<uint8_t>(
literal.start(), literal.length(), hash_seed_);
one_character_strings_[key] = GetString(hash, true, literal);
}
return one_character_strings_[key];
}
uint32_t hash = StringHasher::HashSequentialString<uint8_t>( uint32_t hash = StringHasher::HashSequentialString<uint8_t>(
literal.start(), literal.length(), hash_seed_); literal.start(), literal.length(), hash_seed_);
return GetString(hash, true, literal); return GetString(hash, true, literal);
@ -260,39 +270,6 @@ const AstConsString* AstValueFactory::NewConsString(
return new_string; return new_string;
} }
const AstRawString* AstValueFactory::ConcatStrings(const AstRawString* left,
const AstRawString* right) {
int left_length = left->length();
int right_length = right->length();
const unsigned char* left_data = left->raw_data();
const unsigned char* right_data = right->raw_data();
if (left->is_one_byte() && right->is_one_byte()) {
uint8_t* buffer = zone_->NewArray<uint8_t>(left_length + right_length);
memcpy(buffer, left_data, left_length);
memcpy(buffer + left_length, right_data, right_length);
Vector<const uint8_t> literal(buffer, left_length + right_length);
return GetOneByteStringInternal(literal);
} else {
uint16_t* buffer = zone_->NewArray<uint16_t>(left_length + right_length);
if (left->is_one_byte()) {
for (int i = 0; i < left_length; ++i) {
buffer[i] = left_data[i];
}
} else {
memcpy(buffer, left_data, 2 * left_length);
}
if (right->is_one_byte()) {
for (int i = 0; i < right_length; ++i) {
buffer[i + left_length] = right_data[i];
}
} else {
memcpy(buffer + left_length, right_data, 2 * right_length);
}
Vector<const uint16_t> literal(buffer, left_length + right_length);
return GetTwoByteStringInternal(literal);
}
}
void AstValueFactory::Internalize(Isolate* isolate) { void AstValueFactory::Internalize(Isolate* isolate) {
// Strings need to be internalized before values, because values refer to // Strings need to be internalized before values, because values refer to
// strings. // strings.

View File

@ -30,6 +30,7 @@
#include "src/api.h" #include "src/api.h"
#include "src/base/hashmap.h" #include "src/base/hashmap.h"
#include "src/conversions.h"
#include "src/globals.h" #include "src/globals.h"
#include "src/utils.h" #include "src/utils.h"
@ -110,8 +111,9 @@ class AstRawString final : public AstString {
} }
private: private:
friend class AstValueFactory;
friend class AstRawStringInternalizationKey; friend class AstRawStringInternalizationKey;
friend class AstStringConstants;
friend class AstValueFactory;
AstRawString(bool is_one_byte, const Vector<const byte>& literal_bytes, AstRawString(bool is_one_byte, const Vector<const byte>& literal_bytes,
uint32_t hash) uint32_t hash)
@ -158,10 +160,7 @@ class AstValue : public ZoneObject {
return type_ == STRING; return type_ == STRING;
} }
bool IsNumber() const { bool IsNumber() const { return IsSmi() || IsHeapNumber(); }
return type_ == NUMBER || type_ == NUMBER_WITH_DOT || type_ == SMI ||
type_ == SMI_WITH_DOT;
}
bool ContainsDot() const { bool ContainsDot() const {
return type_ == NUMBER_WITH_DOT || type_ == SMI_WITH_DOT; return type_ == NUMBER_WITH_DOT || type_ == SMI_WITH_DOT;
@ -173,19 +172,30 @@ class AstValue : public ZoneObject {
} }
double AsNumber() const { double AsNumber() const {
if (type_ == NUMBER || type_ == NUMBER_WITH_DOT) if (IsHeapNumber()) return number_;
return number_; if (IsSmi()) return smi_;
if (type_ == SMI || type_ == SMI_WITH_DOT)
return smi_;
UNREACHABLE(); UNREACHABLE();
return 0; return 0;
} }
Smi* AsSmi() const { Smi* AsSmi() const {
CHECK(type_ == SMI || type_ == SMI_WITH_DOT); CHECK(IsSmi());
return Smi::FromInt(smi_); return Smi::FromInt(smi_);
} }
bool ToUint32(uint32_t* value) const {
if (IsSmi()) {
int num = smi_;
if (num < 0) return false;
*value = static_cast<uint32_t>(num);
return true;
}
if (IsHeapNumber()) {
return DoubleToUint32IfEqualToSelf(number_, value);
}
return false;
}
bool EqualsString(const AstRawString* string) const { bool EqualsString(const AstRawString* string) const {
return type_ == STRING && string_ == string; return type_ == STRING && string_ == string;
} }
@ -195,6 +205,9 @@ class AstValue : public ZoneObject {
bool BooleanValue() const; bool BooleanValue() const;
bool IsSmi() const { return type_ == SMI || type_ == SMI_WITH_DOT; } bool IsSmi() const { return type_ == SMI || type_ == SMI_WITH_DOT; }
bool IsHeapNumber() const {
return type_ == NUMBER || type_ == NUMBER_WITH_DOT;
}
bool IsFalse() const { return type_ == BOOLEAN && !bool_; } bool IsFalse() const { return type_ == BOOLEAN && !bool_; }
bool IsTrue() const { return type_ == BOOLEAN && bool_; } bool IsTrue() const { return type_ == BOOLEAN && bool_; }
bool IsUndefined() const { return type_ == UNDEFINED; } bool IsUndefined() const { return type_ == UNDEFINED; }
@ -280,7 +293,6 @@ class AstValue : public ZoneObject {
}; };
}; };
// For generating constants. // For generating constants.
#define STRING_CONSTANTS(F) \ #define STRING_CONSTANTS(F) \
F(anonymous_function, "(anonymous function)") \ F(anonymous_function, "(anonymous function)") \
@ -291,7 +303,6 @@ class AstValue : public ZoneObject {
F(default, "default") \ F(default, "default") \
F(done, "done") \ F(done, "done") \
F(dot, ".") \ F(dot, ".") \
F(dot_class_field_init, ".class-field-init") \
F(dot_for, ".for") \ F(dot_for, ".for") \
F(dot_generator_object, ".generator_object") \ F(dot_generator_object, ".generator_object") \
F(dot_iterator, ".iterator") \ F(dot_iterator, ".iterator") \
@ -304,6 +315,7 @@ class AstValue : public ZoneObject {
F(get_space, "get ") \ F(get_space, "get ") \
F(length, "length") \ F(length, "length") \
F(let, "let") \ F(let, "let") \
F(name, "name") \
F(native, "native") \ F(native, "native") \
F(new_target, ".new.target") \ F(new_target, ".new.target") \
F(next, "next") \ F(next, "next") \
@ -320,6 +332,45 @@ class AstValue : public ZoneObject {
F(use_strict, "use strict") \ F(use_strict, "use strict") \
F(value, "value") F(value, "value")
class AstStringConstants final {
public:
AstStringConstants(Isolate* isolate, uint32_t hash_seed)
: zone_(isolate->allocator(), ZONE_NAME), hash_seed_(hash_seed) {
DCHECK(ThreadId::Current().Equals(isolate->thread_id()));
#define F(name, str) \
{ \
const char* data = str; \
Vector<const uint8_t> literal(reinterpret_cast<const uint8_t*>(data), \
static_cast<int>(strlen(data))); \
uint32_t hash = StringHasher::HashSequentialString<uint8_t>( \
literal.start(), literal.length(), hash_seed_); \
name##_string_ = new (&zone_) AstRawString(true, literal, hash); \
/* The Handle returned by the factory is located on the roots */ \
/* array, not on the temporary HandleScope, so this is safe. */ \
name##_string_->set_string(isolate->factory()->name##_string()); \
}
STRING_CONSTANTS(F)
#undef F
}
#define F(name, str) \
AstRawString* name##_string() { return name##_string_; }
STRING_CONSTANTS(F)
#undef F
uint32_t hash_seed() const { return hash_seed_; }
private:
Zone zone_;
uint32_t hash_seed_;
#define F(name, str) AstRawString* name##_string_;
STRING_CONSTANTS(F)
#undef F
DISALLOW_COPY_AND_ASSIGN(AstStringConstants);
};
#define OTHER_CONSTANTS(F) \ #define OTHER_CONSTANTS(F) \
F(true_value) \ F(true_value) \
F(false_value) \ F(false_value) \
@ -329,21 +380,24 @@ class AstValue : public ZoneObject {
class AstValueFactory { class AstValueFactory {
public: public:
AstValueFactory(Zone* zone, uint32_t hash_seed) AstValueFactory(Zone* zone, AstStringConstants* string_constants,
uint32_t hash_seed)
: string_table_(AstRawStringCompare), : string_table_(AstRawStringCompare),
values_(nullptr), values_(nullptr),
smis_(),
strings_(nullptr), strings_(nullptr),
strings_end_(&strings_), strings_end_(&strings_),
string_constants_(string_constants),
zone_(zone), zone_(zone),
hash_seed_(hash_seed) { hash_seed_(hash_seed) {
#define F(name, str) name##_string_ = NULL; #define F(name) name##_ = nullptr;
STRING_CONSTANTS(F)
#undef F
#define F(name) name##_ = NULL;
OTHER_CONSTANTS(F) OTHER_CONSTANTS(F)
#undef F #undef F
DCHECK_EQ(hash_seed, string_constants->hash_seed());
std::fill(smis_, smis_ + arraysize(smis_), nullptr); std::fill(smis_, smis_ + arraysize(smis_), nullptr);
std::fill(one_character_strings_,
one_character_strings_ + arraysize(one_character_strings_),
nullptr);
InitializeStringConstants();
} }
Zone* zone() const { return zone_; } Zone* zone() const { return zone_; }
@ -361,20 +415,12 @@ class AstValueFactory {
const AstRawString* GetString(Handle<String> literal); const AstRawString* GetString(Handle<String> literal);
const AstConsString* NewConsString(const AstString* left, const AstConsString* NewConsString(const AstString* left,
const AstString* right); const AstString* right);
const AstRawString* ConcatStrings(const AstRawString* left,
const AstRawString* right);
void Internalize(Isolate* isolate); void Internalize(Isolate* isolate);
#define F(name, str) \ #define F(name, str) \
const AstRawString* name##_string() { \ const AstRawString* name##_string() { \
if (name##_string_ == NULL) { \ return string_constants_->name##_string(); \
const char* data = str; \
name##_string_ = GetOneByteString( \
Vector<const uint8_t>(reinterpret_cast<const uint8_t*>(data), \
static_cast<int>(strlen(data)))); \
} \
return name##_string_; \
} }
STRING_CONSTANTS(F) STRING_CONSTANTS(F)
#undef F #undef F
@ -415,6 +461,17 @@ class AstValueFactory {
AstRawString* GetString(uint32_t hash, bool is_one_byte, AstRawString* GetString(uint32_t hash, bool is_one_byte,
Vector<const byte> literal_bytes); Vector<const byte> literal_bytes);
void InitializeStringConstants() {
#define F(name, str) \
AstRawString* raw_string_##name = string_constants_->name##_string(); \
base::HashMap::Entry* entry_##name = string_table_.LookupOrInsert( \
raw_string_##name, raw_string_##name->hash()); \
DCHECK(entry_##name->value == nullptr); \
entry_##name->value = reinterpret_cast<void*>(1);
STRING_CONSTANTS(F)
#undef F
}
static bool AstRawStringCompare(void* a, void* b); static bool AstRawStringCompare(void* a, void* b);
// All strings are copied here, one after another (no NULLs inbetween). // All strings are copied here, one after another (no NULLs inbetween).
@ -423,19 +480,23 @@ class AstValueFactory {
// they can be internalized later). // they can be internalized later).
AstValue* values_; AstValue* values_;
AstValue* smis_[kMaxCachedSmi + 1];
// We need to keep track of strings_ in order since cons strings require their // We need to keep track of strings_ in order since cons strings require their
// members to be internalized first. // members to be internalized first.
AstString* strings_; AstString* strings_;
AstString** strings_end_; AstString** strings_end_;
// Holds constant string values which are shared across the isolate.
AstStringConstants* string_constants_;
// Caches for faster access: small numbers, one character lowercase strings
// (for minified code).
AstValue* smis_[kMaxCachedSmi + 1];
AstRawString* one_character_strings_[26];
Zone* zone_; Zone* zone_;
uint32_t hash_seed_; uint32_t hash_seed_;
#define F(name, str) const AstRawString* name##_string_;
STRING_CONSTANTS(F)
#undef F
#define F(name) AstValue* name##_; #define F(name) AstValue* name##_;
OTHER_CONSTANTS(F) OTHER_CONSTANTS(F)
#undef F #undef F

226
deps/v8/src/ast/ast.cc vendored
View File

@ -10,6 +10,7 @@
#include "src/ast/prettyprinter.h" #include "src/ast/prettyprinter.h"
#include "src/ast/scopes.h" #include "src/ast/scopes.h"
#include "src/base/hashmap.h" #include "src/base/hashmap.h"
#include "src/builtins/builtins-constructor.h"
#include "src/builtins/builtins.h" #include "src/builtins/builtins.h"
#include "src/code-stubs.h" #include "src/code-stubs.h"
#include "src/contexts.h" #include "src/contexts.h"
@ -28,6 +29,8 @@ namespace internal {
#ifdef DEBUG #ifdef DEBUG
void AstNode::Print() { Print(Isolate::Current()); }
void AstNode::Print(Isolate* isolate) { void AstNode::Print(Isolate* isolate) {
AstPrinter::PrintOut(isolate, this); AstPrinter::PrintOut(isolate, this);
} }
@ -70,6 +73,10 @@ bool Expression::IsSmiLiteral() const {
return IsLiteral() && AsLiteral()->raw_value()->IsSmi(); return IsLiteral() && AsLiteral()->raw_value()->IsSmi();
} }
bool Expression::IsNumberLiteral() const {
return IsLiteral() && AsLiteral()->raw_value()->IsNumber();
}
bool Expression::IsStringLiteral() const { bool Expression::IsStringLiteral() const {
return IsLiteral() && AsLiteral()->raw_value()->IsString(); return IsLiteral() && AsLiteral()->raw_value()->IsString();
} }
@ -197,9 +204,7 @@ void VariableProxy::BindTo(Variable* var) {
var->set_is_used(); var->set_is_used();
} }
void VariableProxy::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
void VariableProxy::AssignFeedbackVectorSlots(Isolate* isolate,
FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) { FeedbackVectorSlotCache* cache) {
if (UsesVariableFeedbackSlot()) { if (UsesVariableFeedbackSlot()) {
// VariableProxies that point to the same Variable within a function can // VariableProxies that point to the same Variable within a function can
@ -211,7 +216,7 @@ void VariableProxy::AssignFeedbackVectorSlots(Isolate* isolate,
static_cast<int>(reinterpret_cast<intptr_t>(entry->value))); static_cast<int>(reinterpret_cast<intptr_t>(entry->value)));
return; return;
} }
variable_feedback_slot_ = spec->AddLoadGlobalICSlot(var()->name()); variable_feedback_slot_ = spec->AddLoadGlobalICSlot();
cache->Put(var(), variable_feedback_slot_); cache->Put(var(), variable_feedback_slot_);
} else { } else {
variable_feedback_slot_ = spec->AddLoadICSlot(); variable_feedback_slot_ = spec->AddLoadICSlot();
@ -235,8 +240,7 @@ static void AssignVectorSlots(Expression* expr, FeedbackVectorSpec* spec,
} }
} }
void ForInStatement::AssignFeedbackVectorSlots(Isolate* isolate, void ForInStatement::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) { FeedbackVectorSlotCache* cache) {
AssignVectorSlots(each(), spec, &each_slot_); AssignVectorSlots(each(), spec, &each_slot_);
for_in_feedback_slot_ = spec->AddGeneralSlot(); for_in_feedback_slot_ = spec->AddGeneralSlot();
@ -253,15 +257,12 @@ Assignment::Assignment(Token::Value op, Expression* target, Expression* value,
StoreModeField::encode(STANDARD_STORE) | TokenField::encode(op); StoreModeField::encode(STANDARD_STORE) | TokenField::encode(op);
} }
void Assignment::AssignFeedbackVectorSlots(Isolate* isolate, void Assignment::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) { FeedbackVectorSlotCache* cache) {
AssignVectorSlots(target(), spec, &slot_); AssignVectorSlots(target(), spec, &slot_);
} }
void CountOperation::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
void CountOperation::AssignFeedbackVectorSlots(Isolate* isolate,
FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) { FeedbackVectorSlotCache* cache) {
AssignVectorSlots(expression(), spec, &slot_); AssignVectorSlots(expression(), spec, &slot_);
// Assign a slot to collect feedback about binary operations. Used only in // Assign a slot to collect feedback about binary operations. Used only in
@ -346,6 +347,16 @@ ObjectLiteralProperty::ObjectLiteralProperty(AstValueFactory* ast_value_factory,
} }
} }
FeedbackVectorSlot LiteralProperty::GetStoreDataPropertySlot() const {
int offset = FunctionLiteral::NeedsHomeObject(value_) ? 1 : 0;
return GetSlot(offset);
}
void LiteralProperty::SetStoreDataPropertySlot(FeedbackVectorSlot slot) {
int offset = FunctionLiteral::NeedsHomeObject(value_) ? 1 : 0;
return SetSlot(slot, offset);
}
bool LiteralProperty::NeedsSetFunctionName() const { bool LiteralProperty::NeedsSetFunctionName() const {
return is_computed_name_ && return is_computed_name_ &&
(value_->IsAnonymousFunctionDefinition() || (value_->IsAnonymousFunctionDefinition() ||
@ -360,12 +371,14 @@ ClassLiteralProperty::ClassLiteralProperty(Expression* key, Expression* value,
kind_(kind), kind_(kind),
is_static_(is_static) {} is_static_(is_static) {}
void ClassLiteral::AssignFeedbackVectorSlots(Isolate* isolate, void ClassLiteral::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) { FeedbackVectorSlotCache* cache) {
// This logic that computes the number of slots needed for vector store // This logic that computes the number of slots needed for vector store
// ICs must mirror FullCodeGenerator::VisitClassLiteral. // ICs must mirror BytecodeGenerator::VisitClassLiteral.
prototype_slot_ = spec->AddLoadICSlot(); if (FunctionLiteral::NeedsHomeObject(constructor())) {
home_object_slot_ = spec->AddStoreICSlot();
}
if (NeedsProxySlot()) { if (NeedsProxySlot()) {
proxy_slot_ = spec->AddStoreICSlot(); proxy_slot_ = spec->AddStoreICSlot();
} }
@ -376,6 +389,8 @@ void ClassLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
if (FunctionLiteral::NeedsHomeObject(value)) { if (FunctionLiteral::NeedsHomeObject(value)) {
property->SetSlot(spec->AddStoreICSlot()); property->SetSlot(spec->AddStoreICSlot());
} }
property->SetStoreDataPropertySlot(
spec->AddStoreDataPropertyInLiteralICSlot());
} }
} }
@ -392,8 +407,7 @@ void ObjectLiteral::Property::set_emit_store(bool emit_store) {
bool ObjectLiteral::Property::emit_store() const { return emit_store_; } bool ObjectLiteral::Property::emit_store() const { return emit_store_; }
void ObjectLiteral::AssignFeedbackVectorSlots(Isolate* isolate, void ObjectLiteral::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) { FeedbackVectorSlotCache* cache) {
// This logic that computes the number of slots needed for vector store // This logic that computes the number of slots needed for vector store
// ics must mirror FullCodeGenerator::VisitObjectLiteral. // ics must mirror FullCodeGenerator::VisitObjectLiteral.
@ -406,6 +420,7 @@ void ObjectLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
Literal* key = property->key()->AsLiteral(); Literal* key = property->key()->AsLiteral();
Expression* value = property->value(); Expression* value = property->value();
switch (property->kind()) { switch (property->kind()) {
case ObjectLiteral::Property::SPREAD:
case ObjectLiteral::Property::CONSTANT: case ObjectLiteral::Property::CONSTANT:
UNREACHABLE(); UNREACHABLE();
case ObjectLiteral::Property::MATERIALIZED_LITERAL: case ObjectLiteral::Property::MATERIALIZED_LITERAL:
@ -413,7 +428,7 @@ void ObjectLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
case ObjectLiteral::Property::COMPUTED: case ObjectLiteral::Property::COMPUTED:
// It is safe to use [[Put]] here because the boilerplate already // It is safe to use [[Put]] here because the boilerplate already
// contains computed properties with an uninitialized value. // contains computed properties with an uninitialized value.
if (key->value()->IsInternalizedString()) { if (key->IsStringLiteral()) {
if (property->emit_store()) { if (property->emit_store()) {
property->SetSlot(spec->AddStoreICSlot()); property->SetSlot(spec->AddStoreICSlot());
if (FunctionLiteral::NeedsHomeObject(value)) { if (FunctionLiteral::NeedsHomeObject(value)) {
@ -450,6 +465,8 @@ void ObjectLiteral::AssignFeedbackVectorSlots(Isolate* isolate,
property->SetSlot(spec->AddStoreICSlot()); property->SetSlot(spec->AddStoreICSlot());
} }
} }
property->SetStoreDataPropertySlot(
spec->AddStoreDataPropertyInLiteralICSlot());
} }
} }
@ -491,13 +508,8 @@ bool ObjectLiteral::IsBoilerplateProperty(ObjectLiteral::Property* property) {
property->kind() != ObjectLiteral::Property::PROTOTYPE; property->kind() != ObjectLiteral::Property::PROTOTYPE;
} }
void ObjectLiteral::InitDepthAndFlags() {
void ObjectLiteral::BuildConstantProperties(Isolate* isolate) { if (depth_ > 0) return;
if (!constant_properties_.is_null()) return;
// Allocate a fixed array to hold all the constant properties.
Handle<FixedArray> constant_properties = isolate->factory()->NewFixedArray(
boilerplate_properties_ * 2, TENURED);
int position = 0; int position = 0;
// Accumulate the value in local variables and store it at the end. // Accumulate the value in local variables and store it at the end.
@ -521,50 +533,43 @@ void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
MaterializedLiteral* m_literal = property->value()->AsMaterializedLiteral(); MaterializedLiteral* m_literal = property->value()->AsMaterializedLiteral();
if (m_literal != NULL) { if (m_literal != NULL) {
m_literal->BuildConstants(isolate); m_literal->InitDepthAndFlags();
if (m_literal->depth() >= depth_acc) depth_acc = m_literal->depth() + 1; if (m_literal->depth() >= depth_acc) depth_acc = m_literal->depth() + 1;
} }
// Add CONSTANT and COMPUTED properties to boilerplate. Use undefined const AstValue* key = property->key()->AsLiteral()->raw_value();
// value for COMPUTED properties, the real value is filled in at Expression* value = property->value();
// runtime. The enumeration order is maintained.
Handle<Object> key = property->key()->AsLiteral()->value(); bool is_compile_time_value = CompileTimeValue::IsCompileTimeValue(value);
Handle<Object> value = GetBoilerplateValue(property->value(), isolate);
// Ensure objects that may, at any point in time, contain fields with double // Ensure objects that may, at any point in time, contain fields with double
// representation are always treated as nested objects. This is true for // representation are always treated as nested objects. This is true for
// computed fields (value is undefined), and smi and double literals // computed fields, and smi and double literals.
// (value->IsNumber()).
// TODO(verwaest): Remove once we can store them inline. // TODO(verwaest): Remove once we can store them inline.
if (FLAG_track_double_fields && if (FLAG_track_double_fields &&
(value->IsNumber() || value->IsUninitialized(isolate))) { (value->IsNumberLiteral() || !is_compile_time_value)) {
bit_field_ = MayStoreDoublesField::update(bit_field_, true); bit_field_ = MayStoreDoublesField::update(bit_field_, true);
} }
is_simple = is_simple && !value->IsUninitialized(isolate); is_simple = is_simple && is_compile_time_value;
// Keep track of the number of elements in the object literal and // Keep track of the number of elements in the object literal and
// the largest element index. If the largest element index is // the largest element index. If the largest element index is
// much larger than the number of elements, creating an object // much larger than the number of elements, creating an object
// literal with fast elements will be a waste of space. // literal with fast elements will be a waste of space.
uint32_t element_index = 0; uint32_t element_index = 0;
if (key->IsString() && String::cast(*key)->AsArrayIndex(&element_index)) { if (key->IsString() && key->AsString()->AsArrayIndex(&element_index)) {
max_element_index = Max(element_index, max_element_index); max_element_index = Max(element_index, max_element_index);
elements++; elements++;
key = isolate->factory()->NewNumberFromUint(element_index); } else if (key->ToUint32(&element_index) && element_index != kMaxUInt32) {
} else if (key->ToArrayIndex(&element_index)) {
max_element_index = Max(element_index, max_element_index); max_element_index = Max(element_index, max_element_index);
elements++; elements++;
} else if (key->IsNumber()) {
key = isolate->factory()->NumberToString(key);
} }
// Add name, value pair to the fixed array. // Increment the position for the key and the value.
constant_properties->set(position++, *key); position += 2;
constant_properties->set(position++, *value);
} }
constant_properties_ = constant_properties;
bit_field_ = FastElementsField::update( bit_field_ = FastElementsField::update(
bit_field_, bit_field_,
(max_element_index <= 32) || ((2 * elements) >= max_element_index)); (max_element_index <= 32) || ((2 * elements) >= max_element_index));
@ -574,6 +579,91 @@ void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
set_depth(depth_acc); set_depth(depth_acc);
} }
void ObjectLiteral::BuildConstantProperties(Isolate* isolate) {
if (!constant_properties_.is_null()) return;
// Allocate a fixed array to hold all the constant properties.
Handle<FixedArray> constant_properties =
isolate->factory()->NewFixedArray(boilerplate_properties_ * 2, TENURED);
int position = 0;
for (int i = 0; i < properties()->length(); i++) {
ObjectLiteral::Property* property = properties()->at(i);
if (!IsBoilerplateProperty(property)) {
continue;
}
if (static_cast<uint32_t>(position) == boilerplate_properties_ * 2) {
DCHECK(property->is_computed_name());
break;
}
DCHECK(!property->is_computed_name());
MaterializedLiteral* m_literal = property->value()->AsMaterializedLiteral();
if (m_literal != NULL) {
m_literal->BuildConstants(isolate);
}
// Add CONSTANT and COMPUTED properties to boilerplate. Use undefined
// value for COMPUTED properties, the real value is filled in at
// runtime. The enumeration order is maintained.
Handle<Object> key = property->key()->AsLiteral()->value();
Handle<Object> value = GetBoilerplateValue(property->value(), isolate);
uint32_t element_index = 0;
if (key->IsString() && String::cast(*key)->AsArrayIndex(&element_index)) {
key = isolate->factory()->NewNumberFromUint(element_index);
} else if (key->IsNumber() && !key->ToArrayIndex(&element_index)) {
key = isolate->factory()->NumberToString(key);
}
// Add name, value pair to the fixed array.
constant_properties->set(position++, *key);
constant_properties->set(position++, *value);
}
constant_properties_ = constant_properties;
}
bool ObjectLiteral::IsFastCloningSupported() const {
// The FastCloneShallowObject builtin doesn't copy elements, and object
// literals don't support copy-on-write (COW) elements for now.
// TODO(mvstanton): make object literals support COW elements.
return fast_elements() && has_shallow_properties() &&
properties_count() <= ConstructorBuiltinsAssembler::
kMaximumClonedShallowObjectProperties;
}
void ArrayLiteral::InitDepthAndFlags() {
DCHECK_LT(first_spread_index_, 0);
if (depth_ > 0) return;
int constants_length = values()->length();
// Fill in the literals.
bool is_simple = true;
int depth_acc = 1;
int array_index = 0;
for (; array_index < constants_length; array_index++) {
Expression* element = values()->at(array_index);
DCHECK(!element->IsSpread());
MaterializedLiteral* m_literal = element->AsMaterializedLiteral();
if (m_literal != NULL) {
m_literal->InitDepthAndFlags();
if (m_literal->depth() + 1 > depth_acc) {
depth_acc = m_literal->depth() + 1;
}
}
if (!CompileTimeValue::IsCompileTimeValue(element)) {
is_simple = false;
}
}
set_is_simple(is_simple);
set_depth(depth_acc);
}
void ArrayLiteral::BuildConstantElements(Isolate* isolate) { void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
DCHECK_LT(first_spread_index_, 0); DCHECK_LT(first_spread_index_, 0);
@ -586,8 +676,6 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
isolate->factory()->NewFixedArrayWithHoles(constants_length); isolate->factory()->NewFixedArrayWithHoles(constants_length);
// Fill in the literals. // Fill in the literals.
bool is_simple = true;
int depth_acc = 1;
bool is_holey = false; bool is_holey = false;
int array_index = 0; int array_index = 0;
for (; array_index < constants_length; array_index++) { for (; array_index < constants_length; array_index++) {
@ -596,9 +684,6 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
MaterializedLiteral* m_literal = element->AsMaterializedLiteral(); MaterializedLiteral* m_literal = element->AsMaterializedLiteral();
if (m_literal != NULL) { if (m_literal != NULL) {
m_literal->BuildConstants(isolate); m_literal->BuildConstants(isolate);
if (m_literal->depth() + 1 > depth_acc) {
depth_acc = m_literal->depth() + 1;
}
} }
// New handle scope here, needs to be after BuildContants(). // New handle scope here, needs to be after BuildContants().
@ -611,7 +696,6 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
if (boilerplate_value->IsUninitialized(isolate)) { if (boilerplate_value->IsUninitialized(isolate)) {
boilerplate_value = handle(Smi::kZero, isolate); boilerplate_value = handle(Smi::kZero, isolate);
is_simple = false;
} }
kind = GetMoreGeneralElementsKind(kind, kind = GetMoreGeneralElementsKind(kind,
@ -623,7 +707,7 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
// Simple and shallow arrays can be lazily copied, we transform the // Simple and shallow arrays can be lazily copied, we transform the
// elements array to a copy-on-write array. // elements array to a copy-on-write array.
if (is_simple && depth_acc == 1 && array_index > 0 && if (is_simple() && depth() == 1 && array_index > 0 &&
IsFastSmiOrObjectElementsKind(kind)) { IsFastSmiOrObjectElementsKind(kind)) {
fixed_array->set_map(isolate->heap()->fixed_cow_array_map()); fixed_array->set_map(isolate->heap()->fixed_cow_array_map());
} }
@ -637,20 +721,20 @@ void ArrayLiteral::BuildConstantElements(Isolate* isolate) {
accessor->CopyElements(fixed_array, from_kind, elements, constants_length); accessor->CopyElements(fixed_array, from_kind, elements, constants_length);
} }
// Remember both the literal's constant values as well as the ElementsKind // Remember both the literal's constant values as well as the ElementsKind.
// in a 2-element FixedArray. Handle<ConstantElementsPair> literals =
Handle<FixedArray> literals = isolate->factory()->NewFixedArray(2, TENURED); isolate->factory()->NewConstantElementsPair(kind, elements);
literals->set(0, Smi::FromInt(kind));
literals->set(1, *elements);
constant_elements_ = literals; constant_elements_ = literals;
set_is_simple(is_simple);
set_depth(depth_acc);
} }
bool ArrayLiteral::IsFastCloningSupported() const {
return depth() <= 1 &&
values()->length() <=
ConstructorBuiltinsAssembler::kMaximumClonedShallowArrayElements;
}
void ArrayLiteral::AssignFeedbackVectorSlots(Isolate* isolate, void ArrayLiteral::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) { FeedbackVectorSlotCache* cache) {
// This logic that computes the number of slots needed for vector store // This logic that computes the number of slots needed for vector store
// ics must mirror FullCodeGenerator::VisitArrayLiteral. // ics must mirror FullCodeGenerator::VisitArrayLiteral.
@ -678,6 +762,16 @@ Handle<Object> MaterializedLiteral::GetBoilerplateValue(Expression* expression,
return isolate->factory()->uninitialized_value(); return isolate->factory()->uninitialized_value();
} }
void MaterializedLiteral::InitDepthAndFlags() {
if (IsArrayLiteral()) {
return AsArrayLiteral()->InitDepthAndFlags();
}
if (IsObjectLiteral()) {
return AsObjectLiteral()->InitDepthAndFlags();
}
DCHECK(IsRegExpLiteral());
DCHECK_LE(1, depth()); // Depth should be initialized.
}
void MaterializedLiteral::BuildConstants(Isolate* isolate) { void MaterializedLiteral::BuildConstants(Isolate* isolate) {
if (IsArrayLiteral()) { if (IsArrayLiteral()) {
@ -687,7 +781,6 @@ void MaterializedLiteral::BuildConstants(Isolate* isolate) {
return AsObjectLiteral()->BuildConstantProperties(isolate); return AsObjectLiteral()->BuildConstantProperties(isolate);
} }
DCHECK(IsRegExpLiteral()); DCHECK(IsRegExpLiteral());
DCHECK(depth() >= 1); // Depth should be initialized.
} }
@ -711,8 +804,7 @@ void BinaryOperation::RecordToBooleanTypeFeedback(TypeFeedbackOracle* oracle) {
} }
void BinaryOperation::AssignFeedbackVectorSlots( void BinaryOperation::AssignFeedbackVectorSlots(
Isolate* isolate, FeedbackVectorSpec* spec, FeedbackVectorSpec* spec, FeedbackVectorSlotCache* cache) {
FeedbackVectorSlotCache* cache) {
// Feedback vector slot is only used by interpreter for binary operations. // Feedback vector slot is only used by interpreter for binary operations.
// Full-codegen uses AstId to record type feedback. // Full-codegen uses AstId to record type feedback.
switch (op()) { switch (op()) {
@ -733,8 +825,7 @@ static bool IsTypeof(Expression* expr) {
} }
void CompareOperation::AssignFeedbackVectorSlots( void CompareOperation::AssignFeedbackVectorSlots(
Isolate* isolate, FeedbackVectorSpec* spec, FeedbackVectorSpec* spec, FeedbackVectorSlotCache* cache_) {
FeedbackVectorSlotCache* cache_) {
// Feedback vector slot is only used by interpreter for binary operations. // Feedback vector slot is only used by interpreter for binary operations.
// Full-codegen uses AstId to record type feedback. // Full-codegen uses AstId to record type feedback.
switch (op()) { switch (op()) {
@ -892,7 +983,7 @@ bool Expression::IsMonomorphic() const {
} }
} }
void Call::AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec, void Call::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) { FeedbackVectorSlotCache* cache) {
ic_slot_ = spec->AddCallICSlot(); ic_slot_ = spec->AddCallICSlot();
} }
@ -931,8 +1022,7 @@ CaseClause::CaseClause(Expression* label, ZoneList<Statement*>* statements,
statements_(statements), statements_(statements),
compare_type_(AstType::None()) {} compare_type_(AstType::None()) {}
void CaseClause::AssignFeedbackVectorSlots(Isolate* isolate, void CaseClause::AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) { FeedbackVectorSlotCache* cache) {
type_feedback_slot_ = spec->AddInterpreterCompareICSlot(); type_feedback_slot_ = spec->AddInterpreterCompareICSlot();
} }

353
deps/v8/src/ast/ast.h vendored
View File

@ -5,6 +5,7 @@
#ifndef V8_AST_AST_H_ #ifndef V8_AST_AST_H_
#define V8_AST_AST_H_ #define V8_AST_AST_H_
#include "src/assembler.h"
#include "src/ast/ast-types.h" #include "src/ast/ast-types.h"
#include "src/ast/ast-value-factory.h" #include "src/ast/ast-value-factory.h"
#include "src/ast/modules.h" #include "src/ast/modules.h"
@ -102,6 +103,7 @@ namespace internal {
V(SuperCallReference) \ V(SuperCallReference) \
V(CaseClause) \ V(CaseClause) \
V(EmptyParentheses) \ V(EmptyParentheses) \
V(GetIterator) \
V(DoExpression) \ V(DoExpression) \
V(RewritableExpression) V(RewritableExpression)
@ -154,7 +156,7 @@ class AstProperties final BASE_EMBEDDED {
enum Flag { enum Flag {
kNoFlags = 0, kNoFlags = 0,
kDontSelfOptimize = 1 << 0, kDontSelfOptimize = 1 << 0,
kDontCrankshaft = 1 << 1 kMustUseIgnitionTurbo = 1 << 1
}; };
typedef base::Flags<Flag> Flags; typedef base::Flags<Flag> Flags;
@ -190,6 +192,7 @@ class AstNode: public ZoneObject {
int position() const { return position_; } int position() const { return position_; }
#ifdef DEBUG #ifdef DEBUG
void Print();
void Print(Isolate* isolate); void Print(Isolate* isolate);
#endif // DEBUG #endif // DEBUG
@ -317,6 +320,9 @@ class Expression : public AstNode {
// True iff the expression is a literal represented as a smi. // True iff the expression is a literal represented as a smi.
bool IsSmiLiteral() const; bool IsSmiLiteral() const;
// True iff the expression is a literal represented as a number.
bool IsNumberLiteral() const;
// True iff the expression is a string literal. // True iff the expression is a string literal.
bool IsStringLiteral() const; bool IsStringLiteral() const;
@ -466,9 +472,6 @@ class Block final : public BreakableStatement {
class IgnoreCompletionField class IgnoreCompletionField
: public BitField<bool, BreakableStatement::kNextBitFieldIndex, 1> {}; : public BitField<bool, BreakableStatement::kNextBitFieldIndex, 1> {};
protected:
static const uint8_t kNextBitFieldIndex = IgnoreCompletionField::kNext;
}; };
@ -484,9 +487,6 @@ class DoExpression final : public Expression {
} }
bool IsAnonymousFunctionDefinition() const; bool IsAnonymousFunctionDefinition() const;
protected:
static const uint8_t kNextBitFieldIndex = Expression::kNextBitFieldIndex;
private: private:
friend class AstNodeFactory; friend class AstNodeFactory;
@ -518,8 +518,6 @@ class Declaration : public AstNode {
Declaration(VariableProxy* proxy, Scope* scope, int pos, NodeType type) Declaration(VariableProxy* proxy, Scope* scope, int pos, NodeType type)
: AstNode(pos, type), proxy_(proxy), scope_(scope), next_(nullptr) {} : AstNode(pos, type), proxy_(proxy), scope_(scope), next_(nullptr) {}
static const uint8_t kNextBitFieldIndex = AstNode::kNextBitFieldIndex;
private: private:
VariableProxy* proxy_; VariableProxy* proxy_;
// Nested scope from which the declaration originated. // Nested scope from which the declaration originated.
@ -734,7 +732,7 @@ class ForInStatement final : public ForEachStatement {
void set_subject(Expression* e) { subject_ = e; } void set_subject(Expression* e) { subject_ = e; }
// Type feedback information. // Type feedback information.
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec, void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache); FeedbackVectorSlotCache* cache);
FeedbackVectorSlot EachFeedbackSlot() const { return each_slot_; } FeedbackVectorSlot EachFeedbackSlot() const { return each_slot_; }
FeedbackVectorSlot ForInFeedbackSlot() { FeedbackVectorSlot ForInFeedbackSlot() {
@ -778,9 +776,6 @@ class ForInStatement final : public ForEachStatement {
class ForInTypeField class ForInTypeField
: public BitField<ForInType, ForEachStatement::kNextBitFieldIndex, 1> {}; : public BitField<ForInType, ForEachStatement::kNextBitFieldIndex, 1> {};
protected:
static const uint8_t kNextBitFieldIndex = ForInTypeField::kNext;
}; };
@ -826,12 +821,6 @@ class ForOfStatement final : public ForEachStatement {
void set_result_done(Expression* e) { result_done_ = e; } void set_result_done(Expression* e) { result_done_ = e; }
void set_assign_each(Expression* e) { assign_each_ = e; } void set_assign_each(Expression* e) { assign_each_ = e; }
BailoutId ContinueId() const { return EntryId(); }
BailoutId StackCheckId() const { return BackEdgeId(); }
static int num_ids() { return parent_num_ids() + 1; }
BailoutId BackEdgeId() const { return BailoutId(local_id(0)); }
private: private:
friend class AstNodeFactory; friend class AstNodeFactory;
@ -842,8 +831,6 @@ class ForOfStatement final : public ForEachStatement {
next_result_(NULL), next_result_(NULL),
result_done_(NULL), result_done_(NULL),
assign_each_(NULL) {} assign_each_(NULL) {}
static int parent_num_ids() { return ForEachStatement::num_ids(); }
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
Variable* iterator_; Variable* iterator_;
Expression* assign_iterator_; Expression* assign_iterator_;
@ -930,30 +917,16 @@ class WithStatement final : public Statement {
Statement* statement() const { return statement_; } Statement* statement() const { return statement_; }
void set_statement(Statement* s) { statement_ = s; } void set_statement(Statement* s) { statement_ = s; }
void set_base_id(int id) { base_id_ = id; }
static int num_ids() { return parent_num_ids() + 2; }
BailoutId ToObjectId() const { return BailoutId(local_id(0)); }
BailoutId EntryId() const { return BailoutId(local_id(1)); }
private: private:
friend class AstNodeFactory; friend class AstNodeFactory;
WithStatement(Scope* scope, Expression* expression, Statement* statement, WithStatement(Scope* scope, Expression* expression, Statement* statement,
int pos) int pos)
: Statement(pos, kWithStatement), : Statement(pos, kWithStatement),
base_id_(BailoutId::None().ToInt()),
scope_(scope), scope_(scope),
expression_(expression), expression_(expression),
statement_(statement) {} statement_(statement) {}
static int parent_num_ids() { return 0; }
int base_id() const {
DCHECK(!BailoutId(base_id_).IsNone());
return base_id_;
}
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
int base_id_;
Scope* scope_; Scope* scope_;
Expression* expression_; Expression* expression_;
Statement* statement_; Statement* statement_;
@ -981,7 +954,7 @@ class CaseClause final : public Expression {
// CaseClause will have both a slot in the feedback vector and the // CaseClause will have both a slot in the feedback vector and the
// TypeFeedbackId to record the type information. TypeFeedbackId is used by // TypeFeedbackId to record the type information. TypeFeedbackId is used by
// full codegen and the feedback vector slot is used by interpreter. // full codegen and the feedback vector slot is used by interpreter.
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec, void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache); FeedbackVectorSlotCache* cache);
FeedbackVectorSlot CompareOperationFeedbackSlot() { FeedbackVectorSlot CompareOperationFeedbackSlot() {
@ -1212,22 +1185,15 @@ class SloppyBlockFunctionStatement final : public Statement {
public: public:
Statement* statement() const { return statement_; } Statement* statement() const { return statement_; }
void set_statement(Statement* statement) { statement_ = statement; } void set_statement(Statement* statement) { statement_ = statement; }
Scope* scope() const { return scope_; }
SloppyBlockFunctionStatement* next() { return next_; }
void set_next(SloppyBlockFunctionStatement* next) { next_ = next; }
private: private:
friend class AstNodeFactory; friend class AstNodeFactory;
SloppyBlockFunctionStatement(Statement* statement, Scope* scope) explicit SloppyBlockFunctionStatement(Statement* statement)
: Statement(kNoSourcePosition, kSloppyBlockFunctionStatement), : Statement(kNoSourcePosition, kSloppyBlockFunctionStatement),
statement_(statement), statement_(statement) {}
scope_(scope),
next_(nullptr) {}
Statement* statement_; Statement* statement_;
Scope* const scope_;
SloppyBlockFunctionStatement* next_;
}; };
@ -1317,6 +1283,9 @@ class MaterializedLiteral : public Expression {
depth_ = depth; depth_ = depth;
} }
// Populate the depth field and any flags the literal has.
void InitDepthAndFlags();
// Populate the constant properties/elements fixed array. // Populate the constant properties/elements fixed array.
void BuildConstants(Isolate* isolate); void BuildConstants(Isolate* isolate);
friend class ArrayLiteral; friend class ArrayLiteral;
@ -1347,11 +1316,15 @@ class LiteralProperty : public ZoneObject {
return slots_[offset]; return slots_[offset];
} }
FeedbackVectorSlot GetStoreDataPropertySlot() const;
void SetSlot(FeedbackVectorSlot slot, int offset = 0) { void SetSlot(FeedbackVectorSlot slot, int offset = 0) {
DCHECK_LT(offset, static_cast<int>(arraysize(slots_))); DCHECK_LT(offset, static_cast<int>(arraysize(slots_)));
slots_[offset] = slot; slots_[offset] = slot;
} }
void SetStoreDataPropertySlot(FeedbackVectorSlot slot);
bool NeedsSetFunctionName() const; bool NeedsSetFunctionName() const;
protected: protected:
@ -1374,8 +1347,9 @@ class ObjectLiteralProperty final : public LiteralProperty {
COMPUTED, // Property with computed value (execution time). COMPUTED, // Property with computed value (execution time).
MATERIALIZED_LITERAL, // Property value is a materialized literal. MATERIALIZED_LITERAL, // Property value is a materialized literal.
GETTER, GETTER,
SETTER, // Property is an accessor function. SETTER, // Property is an accessor function.
PROTOTYPE // Property is __proto__. PROTOTYPE, // Property is __proto__.
SPREAD
}; };
Kind kind() const { return kind_; } Kind kind() const { return kind_; }
@ -1412,6 +1386,7 @@ class ObjectLiteral final : public MaterializedLiteral {
typedef ObjectLiteralProperty Property; typedef ObjectLiteralProperty Property;
Handle<FixedArray> constant_properties() const { Handle<FixedArray> constant_properties() const {
DCHECK(!constant_properties_.is_null());
return constant_properties_; return constant_properties_;
} }
int properties_count() const { return boilerplate_properties_; } int properties_count() const { return boilerplate_properties_; }
@ -1428,6 +1403,17 @@ class ObjectLiteral final : public MaterializedLiteral {
// Decide if a property should be in the object boilerplate. // Decide if a property should be in the object boilerplate.
static bool IsBoilerplateProperty(Property* property); static bool IsBoilerplateProperty(Property* property);
// Populate the depth field and flags.
void InitDepthAndFlags();
// Get the constant properties fixed array, populating it if necessary.
Handle<FixedArray> GetOrBuildConstantProperties(Isolate* isolate) {
if (constant_properties_.is_null()) {
BuildConstantProperties(isolate);
}
return constant_properties();
}
// Populate the constant properties fixed array. // Populate the constant properties fixed array.
void BuildConstantProperties(Isolate* isolate); void BuildConstantProperties(Isolate* isolate);
@ -1436,6 +1422,9 @@ class ObjectLiteral final : public MaterializedLiteral {
// marked expressions, no store code is emitted. // marked expressions, no store code is emitted.
void CalculateEmitStore(Zone* zone); void CalculateEmitStore(Zone* zone);
// Determines whether the {FastCloneShallowObject} builtin can be used.
bool IsFastCloningSupported() const;
// Assemble bitfield of flags for the CreateObjectLiteral helper. // Assemble bitfield of flags for the CreateObjectLiteral helper.
int ComputeFlags(bool disable_mementos = false) const { int ComputeFlags(bool disable_mementos = false) const {
int flags = fast_elements() ? kFastElements : kNoFlags; int flags = fast_elements() ? kFastElements : kNoFlags;
@ -1465,22 +1454,15 @@ class ObjectLiteral final : public MaterializedLiteral {
BailoutId CreateLiteralId() const { return BailoutId(local_id(0)); } BailoutId CreateLiteralId() const { return BailoutId(local_id(0)); }
// Return an AST id for a property that is used in simulate instructions. // Return an AST id for a property that is used in simulate instructions.
BailoutId GetIdForPropertyName(int i) { BailoutId GetIdForPropertySet(int i) { return BailoutId(local_id(i + 1)); }
return BailoutId(local_id(2 * i + 1));
}
BailoutId GetIdForPropertySet(int i) {
return BailoutId(local_id(2 * i + 2));
}
// Unlike other AST nodes, this number of bailout IDs allocated for an // Unlike other AST nodes, this number of bailout IDs allocated for an
// ObjectLiteral can vary, so num_ids() is not a static method. // ObjectLiteral can vary, so num_ids() is not a static method.
int num_ids() const { int num_ids() const { return parent_num_ids() + 1 + properties()->length(); }
return parent_num_ids() + 1 + 2 * properties()->length();
}
// Object literals need one feedback slot for each non-trivial value, as well // Object literals need one feedback slot for each non-trivial value, as well
// as some slots for home objects. // as some slots for home objects.
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec, void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache); FeedbackVectorSlotCache* cache);
private: private:
@ -1500,7 +1482,6 @@ class ObjectLiteral final : public MaterializedLiteral {
int local_id(int n) const { return base_id() + parent_num_ids() + n; } int local_id(int n) const { return base_id() + parent_num_ids() + n; }
uint32_t boilerplate_properties_; uint32_t boilerplate_properties_;
FeedbackVectorSlot slot_;
Handle<FixedArray> constant_properties_; Handle<FixedArray> constant_properties_;
ZoneList<Property*>* properties_; ZoneList<Property*>* properties_;
@ -1510,9 +1491,6 @@ class ObjectLiteral final : public MaterializedLiteral {
}; };
class MayStoreDoublesField class MayStoreDoublesField
: public BitField<bool, HasElementsField::kNext, 1> {}; : public BitField<bool, HasElementsField::kNext, 1> {};
protected:
static const uint8_t kNextBitFieldIndex = MayStoreDoublesField::kNext;
}; };
@ -1565,11 +1543,11 @@ class RegExpLiteral final : public MaterializedLiteral {
// for minimizing the work when constructing it at runtime. // for minimizing the work when constructing it at runtime.
class ArrayLiteral final : public MaterializedLiteral { class ArrayLiteral final : public MaterializedLiteral {
public: public:
Handle<FixedArray> constant_elements() const { return constant_elements_; } Handle<ConstantElementsPair> constant_elements() const {
return constant_elements_;
}
ElementsKind constant_elements_kind() const { ElementsKind constant_elements_kind() const {
DCHECK_EQ(2, constant_elements_->length()); return static_cast<ElementsKind>(constant_elements()->elements_kind());
return static_cast<ElementsKind>(
Smi::cast(constant_elements_->get(0))->value());
} }
ZoneList<Expression*>* values() const { return values_; } ZoneList<Expression*>* values() const { return values_; }
@ -1583,9 +1561,23 @@ class ArrayLiteral final : public MaterializedLiteral {
// ArrayLiteral can vary, so num_ids() is not a static method. // ArrayLiteral can vary, so num_ids() is not a static method.
int num_ids() const { return parent_num_ids() + 1 + values()->length(); } int num_ids() const { return parent_num_ids() + 1 + values()->length(); }
// Populate the depth field and flags.
void InitDepthAndFlags();
// Get the constant elements fixed array, populating it if necessary.
Handle<ConstantElementsPair> GetOrBuildConstantElements(Isolate* isolate) {
if (constant_elements_.is_null()) {
BuildConstantElements(isolate);
}
return constant_elements();
}
// Populate the constant elements fixed array. // Populate the constant elements fixed array.
void BuildConstantElements(Isolate* isolate); void BuildConstantElements(Isolate* isolate);
// Determines whether the {FastCloneShallowArray} builtin can be used.
bool IsFastCloningSupported() const;
// Assemble bitfield of flags for the CreateArrayLiteral helper. // Assemble bitfield of flags for the CreateArrayLiteral helper.
int ComputeFlags(bool disable_mementos = false) const { int ComputeFlags(bool disable_mementos = false) const {
int flags = depth() == 1 ? kShallowElements : kNoFlags; int flags = depth() == 1 ? kShallowElements : kNoFlags;
@ -1614,7 +1606,7 @@ class ArrayLiteral final : public MaterializedLiteral {
kDisableMementos = 1 << 1 kDisableMementos = 1 << 1
}; };
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec, void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache); FeedbackVectorSlotCache* cache);
FeedbackVectorSlot LiteralFeedbackSlot() const { return literal_slot_; } FeedbackVectorSlot LiteralFeedbackSlot() const { return literal_slot_; }
@ -1632,7 +1624,7 @@ class ArrayLiteral final : public MaterializedLiteral {
int first_spread_index_; int first_spread_index_;
FeedbackVectorSlot literal_slot_; FeedbackVectorSlot literal_slot_;
Handle<FixedArray> constant_elements_; Handle<ConstantElementsPair> constant_elements_;
ZoneList<Expression*>* values_; ZoneList<Expression*>* values_;
}; };
@ -1663,6 +1655,9 @@ class VariableProxy final : public Expression {
bool is_assigned() const { return IsAssignedField::decode(bit_field_); } bool is_assigned() const { return IsAssignedField::decode(bit_field_); }
void set_is_assigned() { void set_is_assigned() {
bit_field_ = IsAssignedField::update(bit_field_, true); bit_field_ = IsAssignedField::update(bit_field_, true);
if (is_resolved()) {
var()->set_maybe_assigned();
}
} }
bool is_resolved() const { return IsResolvedField::decode(bit_field_); } bool is_resolved() const { return IsResolvedField::decode(bit_field_); }
@ -1690,7 +1685,7 @@ class VariableProxy final : public Expression {
return var()->IsUnallocated() || var()->IsLookupSlot(); return var()->IsUnallocated() || var()->IsLookupSlot();
} }
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec, void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache); FeedbackVectorSlotCache* cache);
FeedbackVectorSlot VariableFeedbackSlot() { return variable_feedback_slot_; } FeedbackVectorSlot VariableFeedbackSlot() { return variable_feedback_slot_; }
@ -1786,7 +1781,7 @@ class Property final : public Expression {
bool IsSuperAccess() { return obj()->IsSuperPropertyReference(); } bool IsSuperAccess() { return obj()->IsSuperPropertyReference(); }
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec, void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) { FeedbackVectorSlotCache* cache) {
FeedbackVectorSlotKind kind = key()->IsPropertyName() FeedbackVectorSlotKind kind = key()->IsPropertyName()
? FeedbackVectorSlotKind::LOAD_IC ? FeedbackVectorSlotKind::LOAD_IC
@ -1844,7 +1839,7 @@ class Call final : public Expression {
void set_expression(Expression* e) { expression_ = e; } void set_expression(Expression* e) { expression_ = e; }
// Type feedback information. // Type feedback information.
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec, void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache); FeedbackVectorSlotCache* cache);
FeedbackVectorSlot CallFeedbackICSlot() const { return ic_slot_; } FeedbackVectorSlot CallFeedbackICSlot() const { return ic_slot_; }
@ -1876,11 +1871,9 @@ class Call final : public Expression {
allocation_site_ = site; allocation_site_ = site;
} }
static int num_ids() { return parent_num_ids() + 4; } static int num_ids() { return parent_num_ids() + 2; }
BailoutId ReturnId() const { return BailoutId(local_id(0)); } BailoutId ReturnId() const { return BailoutId(local_id(0)); }
BailoutId EvalId() const { return BailoutId(local_id(1)); } BailoutId CallId() const { return BailoutId(local_id(1)); }
BailoutId LookupId() const { return BailoutId(local_id(2)); }
BailoutId CallId() const { return BailoutId(local_id(3)); }
bool is_uninitialized() const { bool is_uninitialized() const {
return IsUninitializedField::decode(bit_field_); return IsUninitializedField::decode(bit_field_);
@ -1964,7 +1957,7 @@ class CallNew final : public Expression {
void set_expression(Expression* e) { expression_ = e; } void set_expression(Expression* e) { expression_ = e; }
// Type feedback information. // Type feedback information.
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec, void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) { FeedbackVectorSlotCache* cache) {
// CallNew stores feedback in the exact same way as Call. We can // CallNew stores feedback in the exact same way as Call. We can
// piggyback on the type feedback infrastructure for calls. // piggyback on the type feedback infrastructure for calls.
@ -2138,7 +2131,7 @@ class BinaryOperation final : public Expression {
// BinaryOperation will have both a slot in the feedback vector and the // BinaryOperation will have both a slot in the feedback vector and the
// TypeFeedbackId to record the type information. TypeFeedbackId is used // TypeFeedbackId to record the type information. TypeFeedbackId is used
// by full codegen and the feedback vector slot is used by interpreter. // by full codegen and the feedback vector slot is used by interpreter.
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec, void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache); FeedbackVectorSlotCache* cache);
FeedbackVectorSlot BinaryOperationFeedbackSlot() const { FeedbackVectorSlot BinaryOperationFeedbackSlot() const {
@ -2231,7 +2224,7 @@ class CountOperation final : public Expression {
return binary_operation_slot_; return binary_operation_slot_;
} }
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec, void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache); FeedbackVectorSlotCache* cache);
FeedbackVectorSlot CountSlot() const { return slot_; } FeedbackVectorSlot CountSlot() const { return slot_; }
@ -2283,7 +2276,7 @@ class CompareOperation final : public Expression {
// CompareOperation will have both a slot in the feedback vector and the // CompareOperation will have both a slot in the feedback vector and the
// TypeFeedbackId to record the type information. TypeFeedbackId is used // TypeFeedbackId to record the type information. TypeFeedbackId is used
// by full codegen and the feedback vector slot is used by interpreter. // by full codegen and the feedback vector slot is used by interpreter.
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec, void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache); FeedbackVectorSlotCache* cache);
FeedbackVectorSlot CompareOperationFeedbackSlot() const { FeedbackVectorSlot CompareOperationFeedbackSlot() const {
@ -2429,7 +2422,7 @@ class Assignment final : public Expression {
bit_field_ = StoreModeField::update(bit_field_, mode); bit_field_ = StoreModeField::update(bit_field_, mode);
} }
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec, void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache); FeedbackVectorSlotCache* cache);
FeedbackVectorSlot AssignmentSlot() const { return slot_; } FeedbackVectorSlot AssignmentSlot() const { return slot_; }
@ -2571,6 +2564,8 @@ class FunctionLiteral final : public Expression {
kAccessorOrMethod kAccessorOrMethod
}; };
enum IdType { kIdTypeInvalid = -1, kIdTypeTopLevel = 0 };
enum ParameterFlag { kNoDuplicateParameters, kHasDuplicateParameters }; enum ParameterFlag { kNoDuplicateParameters, kHasDuplicateParameters };
enum EagerCompileHint { kShouldEagerCompile, kShouldLazyCompile }; enum EagerCompileHint { kShouldEagerCompile, kShouldLazyCompile };
@ -2594,6 +2589,18 @@ class FunctionLiteral final : public Expression {
} }
LanguageMode language_mode() const; LanguageMode language_mode() const;
void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
// The + 1 is because we need an array with room for the literals
// as well as the feedback vector.
literal_feedback_slot_ =
spec->AddCreateClosureSlot(materialized_literal_count_ + 1);
}
FeedbackVectorSlot LiteralFeedbackSlot() const {
return literal_feedback_slot_;
}
static bool NeedsHomeObject(Expression* expr); static bool NeedsHomeObject(Expression* expr);
int materialized_literal_count() { return materialized_literal_count_; } int materialized_literal_count() { return materialized_literal_count_; }
@ -2644,8 +2651,6 @@ class FunctionLiteral final : public Expression {
return HasDuplicateParameters::decode(bit_field_); return HasDuplicateParameters::decode(bit_field_);
} }
bool is_function() const { return IsFunction::decode(bit_field_); }
// This is used as a heuristic on when to eagerly compile a function // This is used as a heuristic on when to eagerly compile a function
// literal. We consider the following constructs as hints that the // literal. We consider the following constructs as hints that the
// function will be called immediately: // function will be called immediately:
@ -2691,25 +2696,15 @@ class FunctionLiteral final : public Expression {
int yield_count() { return yield_count_; } int yield_count() { return yield_count_; }
void set_yield_count(int yield_count) { yield_count_ = yield_count; } void set_yield_count(int yield_count) { yield_count_ = yield_count; }
bool requires_class_field_init() {
return RequiresClassFieldInit::decode(bit_field_);
}
void set_requires_class_field_init(bool requires_class_field_init) {
bit_field_ =
RequiresClassFieldInit::update(bit_field_, requires_class_field_init);
}
bool is_class_field_initializer() {
return IsClassFieldInitializer::decode(bit_field_);
}
void set_is_class_field_initializer(bool is_class_field_initializer) {
bit_field_ =
IsClassFieldInitializer::update(bit_field_, is_class_field_initializer);
}
int return_position() { int return_position() {
return std::max(start_position(), end_position() - (has_braces_ ? 1 : 0)); return std::max(start_position(), end_position() - (has_braces_ ? 1 : 0));
} }
int function_literal_id() const { return function_literal_id_; }
void set_function_literal_id(int function_literal_id) {
function_literal_id_ = function_literal_id;
}
private: private:
friend class AstNodeFactory; friend class AstNodeFactory;
@ -2720,7 +2715,7 @@ class FunctionLiteral final : public Expression {
int function_length, FunctionType function_type, int function_length, FunctionType function_type,
ParameterFlag has_duplicate_parameters, ParameterFlag has_duplicate_parameters,
EagerCompileHint eager_compile_hint, int position, EagerCompileHint eager_compile_hint, int position,
bool is_function, bool has_braces) bool has_braces, int function_literal_id)
: Expression(position, kFunctionLiteral), : Expression(position, kFunctionLiteral),
materialized_literal_count_(materialized_literal_count), materialized_literal_count_(materialized_literal_count),
expected_property_count_(expected_property_count), expected_property_count_(expected_property_count),
@ -2733,16 +2728,14 @@ class FunctionLiteral final : public Expression {
scope_(scope), scope_(scope),
body_(body), body_(body),
raw_inferred_name_(ast_value_factory->empty_string()), raw_inferred_name_(ast_value_factory->empty_string()),
ast_properties_(zone) { ast_properties_(zone),
bit_field_ |= function_literal_id_(function_literal_id) {
FunctionTypeBits::encode(function_type) | Pretenure::encode(false) | bit_field_ |= FunctionTypeBits::encode(function_type) |
HasDuplicateParameters::encode(has_duplicate_parameters == Pretenure::encode(false) |
kHasDuplicateParameters) | HasDuplicateParameters::encode(has_duplicate_parameters ==
IsFunction::encode(is_function) | kHasDuplicateParameters) |
RequiresClassFieldInit::encode(false) | ShouldNotBeUsedOnceHintField::encode(false) |
ShouldNotBeUsedOnceHintField::encode(false) | DontOptimizeReasonField::encode(kNoReason);
DontOptimizeReasonField::encode(kNoReason) |
IsClassFieldInitializer::encode(false);
if (eager_compile_hint == kShouldEagerCompile) SetShouldEagerCompile(); if (eager_compile_hint == kShouldEagerCompile) SetShouldEagerCompile();
} }
@ -2750,15 +2743,11 @@ class FunctionLiteral final : public Expression {
: public BitField<FunctionType, Expression::kNextBitFieldIndex, 2> {}; : public BitField<FunctionType, Expression::kNextBitFieldIndex, 2> {};
class Pretenure : public BitField<bool, FunctionTypeBits::kNext, 1> {}; class Pretenure : public BitField<bool, FunctionTypeBits::kNext, 1> {};
class HasDuplicateParameters : public BitField<bool, Pretenure::kNext, 1> {}; class HasDuplicateParameters : public BitField<bool, Pretenure::kNext, 1> {};
class IsFunction : public BitField<bool, HasDuplicateParameters::kNext, 1> {};
class ShouldNotBeUsedOnceHintField class ShouldNotBeUsedOnceHintField
: public BitField<bool, IsFunction::kNext, 1> {}; : public BitField<bool, HasDuplicateParameters::kNext, 1> {};
class RequiresClassFieldInit
: public BitField<bool, ShouldNotBeUsedOnceHintField::kNext, 1> {};
class IsClassFieldInitializer
: public BitField<bool, RequiresClassFieldInit::kNext, 1> {};
class DontOptimizeReasonField class DontOptimizeReasonField
: public BitField<BailoutReason, IsClassFieldInitializer::kNext, 8> {}; : public BitField<BailoutReason, ShouldNotBeUsedOnceHintField::kNext, 8> {
};
int materialized_literal_count_; int materialized_literal_count_;
int expected_property_count_; int expected_property_count_;
@ -2774,6 +2763,8 @@ class FunctionLiteral final : public Expression {
const AstString* raw_inferred_name_; const AstString* raw_inferred_name_;
Handle<String> inferred_name_; Handle<String> inferred_name_;
AstProperties ast_properties_; AstProperties ast_properties_;
int function_literal_id_;
FeedbackVectorSlot literal_feedback_slot_;
}; };
// Property is used for passing information // Property is used for passing information
@ -2808,27 +2799,16 @@ class ClassLiteral final : public Expression {
ZoneList<Property*>* properties() const { return properties_; } ZoneList<Property*>* properties() const { return properties_; }
int start_position() const { return position(); } int start_position() const { return position(); }
int end_position() const { return end_position_; } int end_position() const { return end_position_; }
bool has_name_static_property() const {
VariableProxy* static_initializer_proxy() const { return HasNameStaticProperty::decode(bit_field_);
return static_initializer_proxy_;
} }
void set_static_initializer_proxy(VariableProxy* proxy) { bool has_static_computed_names() const {
static_initializer_proxy_ = proxy; return HasStaticComputedNames::decode(bit_field_);
} }
BailoutId CreateLiteralId() const { return BailoutId(local_id(0)); }
BailoutId PrototypeId() { return BailoutId(local_id(1)); }
// Return an AST id for a property that is used in simulate instructions.
BailoutId GetIdForProperty(int i) { return BailoutId(local_id(i + 2)); }
// Unlike other AST nodes, this number of bailout IDs allocated for an
// ClassLiteral can vary, so num_ids() is not a static method.
int num_ids() const { return parent_num_ids() + 2 + properties()->length(); }
// Object literals need one feedback slot for each non-trivial value, as well // Object literals need one feedback slot for each non-trivial value, as well
// as some slots for home objects. // as some slots for home objects.
void AssignFeedbackVectorSlots(Isolate* isolate, FeedbackVectorSpec* spec, void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache); FeedbackVectorSlotCache* cache);
bool NeedsProxySlot() const { bool NeedsProxySlot() const {
@ -2836,7 +2816,7 @@ class ClassLiteral final : public Expression {
class_variable_proxy()->var()->IsUnallocated(); class_variable_proxy()->var()->IsUnallocated();
} }
FeedbackVectorSlot PrototypeSlot() const { return prototype_slot_; } FeedbackVectorSlot HomeObjectSlot() const { return home_object_slot_; }
FeedbackVectorSlot ProxySlot() const { return proxy_slot_; } FeedbackVectorSlot ProxySlot() const { return proxy_slot_; }
private: private:
@ -2844,26 +2824,30 @@ class ClassLiteral final : public Expression {
ClassLiteral(VariableProxy* class_variable_proxy, Expression* extends, ClassLiteral(VariableProxy* class_variable_proxy, Expression* extends,
FunctionLiteral* constructor, ZoneList<Property*>* properties, FunctionLiteral* constructor, ZoneList<Property*>* properties,
int start_position, int end_position) int start_position, int end_position,
bool has_name_static_property, bool has_static_computed_names)
: Expression(start_position, kClassLiteral), : Expression(start_position, kClassLiteral),
end_position_(end_position), end_position_(end_position),
class_variable_proxy_(class_variable_proxy), class_variable_proxy_(class_variable_proxy),
extends_(extends), extends_(extends),
constructor_(constructor), constructor_(constructor),
properties_(properties), properties_(properties) {
static_initializer_proxy_(nullptr) {} bit_field_ |= HasNameStaticProperty::encode(has_name_static_property) |
HasStaticComputedNames::encode(has_static_computed_names);
static int parent_num_ids() { return Expression::num_ids(); } }
int local_id(int n) const { return base_id() + parent_num_ids() + n; }
int end_position_; int end_position_;
FeedbackVectorSlot prototype_slot_; FeedbackVectorSlot home_object_slot_;
FeedbackVectorSlot proxy_slot_; FeedbackVectorSlot proxy_slot_;
VariableProxy* class_variable_proxy_; VariableProxy* class_variable_proxy_;
Expression* extends_; Expression* extends_;
FunctionLiteral* constructor_; FunctionLiteral* constructor_;
ZoneList<Property*>* properties_; ZoneList<Property*>* properties_;
VariableProxy* static_initializer_proxy_;
class HasNameStaticProperty
: public BitField<bool, Expression::kNextBitFieldIndex, 1> {};
class HasStaticComputedNames
: public BitField<bool, HasNameStaticProperty::kNext, 1> {};
}; };
@ -2871,6 +2855,19 @@ class NativeFunctionLiteral final : public Expression {
public: public:
Handle<String> name() const { return name_->string(); } Handle<String> name() const { return name_->string(); }
v8::Extension* extension() const { return extension_; } v8::Extension* extension() const { return extension_; }
FeedbackVectorSlot LiteralFeedbackSlot() const {
return literal_feedback_slot_;
}
void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
// 0 is a magic number here. It means we are holding the literals
// array for a native function literal, which needs to be
// the empty literals array.
// TODO(mvstanton): The FeedbackVectorSlotCache can be adapted
// to always return the same slot for this case.
literal_feedback_slot_ = spec->AddCreateClosureSlot(0);
}
private: private:
friend class AstNodeFactory; friend class AstNodeFactory;
@ -2883,6 +2880,7 @@ class NativeFunctionLiteral final : public Expression {
const AstRawString* name_; const AstRawString* name_;
v8::Extension* extension_; v8::Extension* extension_;
FeedbackVectorSlot literal_feedback_slot_;
}; };
@ -2955,7 +2953,43 @@ class EmptyParentheses final : public Expression {
explicit EmptyParentheses(int pos) : Expression(pos, kEmptyParentheses) {} explicit EmptyParentheses(int pos) : Expression(pos, kEmptyParentheses) {}
}; };
// Represents the spec operation `GetIterator()`
// (defined at https://tc39.github.io/ecma262/#sec-getiterator). Ignition
// desugars this into a LoadIC / JSLoadNamed, CallIC, and a type-check to
// validate return value of the Symbol.iterator() call.
class GetIterator final : public Expression {
public:
Expression* iterable() const { return iterable_; }
void set_iterable(Expression* iterable) { iterable_ = iterable; }
static int num_ids() { return parent_num_ids(); }
void AssignFeedbackVectorSlots(FeedbackVectorSpec* spec,
FeedbackVectorSlotCache* cache) {
iterator_property_feedback_slot_ =
spec->AddSlot(FeedbackVectorSlotKind::LOAD_IC);
iterator_call_feedback_slot_ =
spec->AddSlot(FeedbackVectorSlotKind::CALL_IC);
}
FeedbackVectorSlot IteratorPropertyFeedbackSlot() const {
return iterator_property_feedback_slot_;
}
FeedbackVectorSlot IteratorCallFeedbackSlot() const {
return iterator_call_feedback_slot_;
}
private:
friend class AstNodeFactory;
explicit GetIterator(Expression* iterable, int pos)
: Expression(pos, kGetIterator), iterable_(iterable) {}
Expression* iterable_;
FeedbackVectorSlot iterator_property_feedback_slot_;
FeedbackVectorSlot iterator_call_feedback_slot_;
};
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Basic visitor // Basic visitor
@ -3217,15 +3251,6 @@ class AstNodeFactory final BASE_EMBEDDED {
try_block, scope, variable, catch_block, HandlerTable::UNCAUGHT, pos); try_block, scope, variable, catch_block, HandlerTable::UNCAUGHT, pos);
} }
TryCatchStatement* NewTryCatchStatementForPromiseReject(Block* try_block,
Scope* scope,
Variable* variable,
Block* catch_block,
int pos) {
return new (zone_) TryCatchStatement(
try_block, scope, variable, catch_block, HandlerTable::PROMISE, pos);
}
TryCatchStatement* NewTryCatchStatementForDesugaring(Block* try_block, TryCatchStatement* NewTryCatchStatementForDesugaring(Block* try_block,
Scope* scope, Scope* scope,
Variable* variable, Variable* variable,
@ -3258,9 +3283,9 @@ class AstNodeFactory final BASE_EMBEDDED {
return new (zone_) EmptyStatement(pos); return new (zone_) EmptyStatement(pos);
} }
SloppyBlockFunctionStatement* NewSloppyBlockFunctionStatement(Scope* scope) { SloppyBlockFunctionStatement* NewSloppyBlockFunctionStatement() {
return new (zone_) SloppyBlockFunctionStatement( return new (zone_)
NewEmptyStatement(kNoSourcePosition), scope); SloppyBlockFunctionStatement(NewEmptyStatement(kNoSourcePosition));
} }
CaseClause* NewCaseClause( CaseClause* NewCaseClause(
@ -3437,9 +3462,13 @@ class AstNodeFactory final BASE_EMBEDDED {
Expression* value, Expression* value,
int pos) { int pos) {
DCHECK(Token::IsAssignmentOp(op)); DCHECK(Token::IsAssignmentOp(op));
if (op != Token::INIT && target->IsVariableProxy()) {
target->AsVariableProxy()->set_is_assigned();
}
Assignment* assign = new (zone_) Assignment(op, target, value, pos); Assignment* assign = new (zone_) Assignment(op, target, value, pos);
if (assign->is_compound()) { if (assign->is_compound()) {
DCHECK(Token::IsAssignmentOp(op));
assign->binary_operation_ = assign->binary_operation_ =
NewBinaryOperation(assign->binary_op(), target, value, pos + 1); NewBinaryOperation(assign->binary_op(), target, value, pos + 1);
} }
@ -3463,12 +3492,12 @@ class AstNodeFactory final BASE_EMBEDDED {
FunctionLiteral::ParameterFlag has_duplicate_parameters, FunctionLiteral::ParameterFlag has_duplicate_parameters,
FunctionLiteral::FunctionType function_type, FunctionLiteral::FunctionType function_type,
FunctionLiteral::EagerCompileHint eager_compile_hint, int position, FunctionLiteral::EagerCompileHint eager_compile_hint, int position,
bool has_braces) { bool has_braces, int function_literal_id) {
return new (zone_) FunctionLiteral( return new (zone_) FunctionLiteral(
zone_, name, ast_value_factory_, scope, body, zone_, name, ast_value_factory_, scope, body,
materialized_literal_count, expected_property_count, parameter_count, materialized_literal_count, expected_property_count, parameter_count,
function_length, function_type, has_duplicate_parameters, function_length, function_type, has_duplicate_parameters,
eager_compile_hint, position, true, has_braces); eager_compile_hint, position, has_braces, function_literal_id);
} }
// Creates a FunctionLiteral representing a top-level script, the // Creates a FunctionLiteral representing a top-level script, the
@ -3483,7 +3512,8 @@ class AstNodeFactory final BASE_EMBEDDED {
body, materialized_literal_count, expected_property_count, body, materialized_literal_count, expected_property_count,
parameter_count, parameter_count, FunctionLiteral::kAnonymousExpression, parameter_count, parameter_count, FunctionLiteral::kAnonymousExpression,
FunctionLiteral::kNoDuplicateParameters, FunctionLiteral::kNoDuplicateParameters,
FunctionLiteral::kShouldLazyCompile, 0, false, true); FunctionLiteral::kShouldLazyCompile, 0, true,
FunctionLiteral::kIdTypeTopLevel);
} }
ClassLiteral::Property* NewClassLiteralProperty( ClassLiteral::Property* NewClassLiteralProperty(
@ -3496,9 +3526,12 @@ class AstNodeFactory final BASE_EMBEDDED {
ClassLiteral* NewClassLiteral(VariableProxy* proxy, Expression* extends, ClassLiteral* NewClassLiteral(VariableProxy* proxy, Expression* extends,
FunctionLiteral* constructor, FunctionLiteral* constructor,
ZoneList<ClassLiteral::Property*>* properties, ZoneList<ClassLiteral::Property*>* properties,
int start_position, int end_position) { int start_position, int end_position,
return new (zone_) ClassLiteral(proxy, extends, constructor, properties, bool has_name_static_property,
start_position, end_position); bool has_static_computed_names) {
return new (zone_) ClassLiteral(
proxy, extends, constructor, properties, start_position, end_position,
has_name_static_property, has_static_computed_names);
} }
NativeFunctionLiteral* NewNativeFunctionLiteral(const AstRawString* name, NativeFunctionLiteral* NewNativeFunctionLiteral(const AstRawString* name,
@ -3534,6 +3567,10 @@ class AstNodeFactory final BASE_EMBEDDED {
return new (zone_) EmptyParentheses(pos); return new (zone_) EmptyParentheses(pos);
} }
GetIterator* NewGetIterator(Expression* iterable, int pos) {
return new (zone_) GetIterator(iterable, pos);
}
Zone* zone() const { return zone_; } Zone* zone() const { return zone_; }
void set_zone(Zone* zone) { zone_ = zone; } void set_zone(Zone* zone) { zone_ = zone; }

View File

@ -48,8 +48,8 @@ CompileTimeValue::LiteralType CompileTimeValue::GetLiteralType(
return static_cast<LiteralType>(literal_type->value()); return static_cast<LiteralType>(literal_type->value());
} }
Handle<FixedArray> CompileTimeValue::GetElements(Handle<FixedArray> value) { Handle<HeapObject> CompileTimeValue::GetElements(Handle<FixedArray> value) {
return Handle<FixedArray>(FixedArray::cast(value->get(kElementsSlot))); return Handle<HeapObject>(HeapObject::cast(value->get(kElementsSlot)));
} }
} // namespace internal } // namespace internal

View File

@ -31,8 +31,8 @@ class CompileTimeValue : public AllStatic {
// Get the type of a compile time value returned by GetValue(). // Get the type of a compile time value returned by GetValue().
static LiteralType GetLiteralType(Handle<FixedArray> value); static LiteralType GetLiteralType(Handle<FixedArray> value);
// Get the elements array of a compile time value returned by GetValue(). // Get the elements of a compile time value returned by GetValue().
static Handle<FixedArray> GetElements(Handle<FixedArray> value); static Handle<HeapObject> GetElements(Handle<FixedArray> value);
private: private:
static const int kLiteralTypeSlot = 0; static const int kLiteralTypeSlot = 0;

View File

@ -5,6 +5,8 @@
#include "src/ast/modules.h" #include "src/ast/modules.h"
#include "src/ast/ast-value-factory.h" #include "src/ast/ast-value-factory.h"
#include "src/ast/scopes.h" #include "src/ast/scopes.h"
#include "src/objects-inl.h"
#include "src/objects/module-info.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {

View File

@ -10,18 +10,19 @@
#include "src/ast/scopes.h" #include "src/ast/scopes.h"
#include "src/base/platform/platform.h" #include "src/base/platform/platform.h"
#include "src/globals.h" #include "src/globals.h"
#include "src/objects-inl.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
CallPrinter::CallPrinter(Isolate* isolate, bool is_builtin) CallPrinter::CallPrinter(Isolate* isolate, bool is_user_js)
: builder_(isolate) { : builder_(isolate) {
isolate_ = isolate; isolate_ = isolate;
position_ = 0; position_ = 0;
num_prints_ = 0; num_prints_ = 0;
found_ = false; found_ = false;
done_ = false; done_ = false;
is_builtin_ = is_builtin; is_user_js_ = is_user_js;
InitializeAstVisitor(isolate); InitializeAstVisitor(isolate);
} }
@ -239,11 +240,11 @@ void CallPrinter::VisitArrayLiteral(ArrayLiteral* node) {
void CallPrinter::VisitVariableProxy(VariableProxy* node) { void CallPrinter::VisitVariableProxy(VariableProxy* node) {
if (is_builtin_) { if (is_user_js_) {
// Variable names of builtins are meaningless due to minification.
Print("(var)");
} else {
PrintLiteral(node->name(), false); PrintLiteral(node->name(), false);
} else {
// Variable names of non-user code are meaningless due to minification.
Print("(var)");
} }
} }
@ -279,9 +280,9 @@ void CallPrinter::VisitProperty(Property* node) {
void CallPrinter::VisitCall(Call* node) { void CallPrinter::VisitCall(Call* node) {
bool was_found = !found_ && node->position() == position_; bool was_found = !found_ && node->position() == position_;
if (was_found) { if (was_found) {
// Bail out if the error is caused by a direct call to a variable in builtin // Bail out if the error is caused by a direct call to a variable in
// code. The variable name is meaningless due to minification. // non-user JS code. The variable name is meaningless due to minification.
if (is_builtin_ && node->expression()->IsVariableProxy()) { if (!is_user_js_ && node->expression()->IsVariableProxy()) {
done_ = true; done_ = true;
return; return;
} }
@ -297,9 +298,9 @@ void CallPrinter::VisitCall(Call* node) {
void CallPrinter::VisitCallNew(CallNew* node) { void CallPrinter::VisitCallNew(CallNew* node) {
bool was_found = !found_ && node->position() == position_; bool was_found = !found_ && node->position() == position_;
if (was_found) { if (was_found) {
// Bail out if the error is caused by a direct call to a variable in builtin // Bail out if the error is caused by a direct call to a variable in
// code. The variable name is meaningless due to minification. // non-user JS code. The variable name is meaningless due to minification.
if (is_builtin_ && node->expression()->IsVariableProxy()) { if (!is_user_js_ && node->expression()->IsVariableProxy()) {
done_ = true; done_ = true;
return; return;
} }
@ -370,6 +371,11 @@ void CallPrinter::VisitEmptyParentheses(EmptyParentheses* node) {
UNREACHABLE(); UNREACHABLE();
} }
void CallPrinter::VisitGetIterator(GetIterator* node) {
Print("GetIterator(");
Find(node->iterable(), true);
Print(")");
}
void CallPrinter::VisitThisFunction(ThisFunction* node) {} void CallPrinter::VisitThisFunction(ThisFunction* node) {}
@ -874,15 +880,16 @@ void AstPrinter::PrintTryStatement(TryStatement* node) {
case HandlerTable::CAUGHT: case HandlerTable::CAUGHT:
prediction = "CAUGHT"; prediction = "CAUGHT";
break; break;
case HandlerTable::PROMISE:
prediction = "PROMISE";
break;
case HandlerTable::DESUGARING: case HandlerTable::DESUGARING:
prediction = "DESUGARING"; prediction = "DESUGARING";
break; break;
case HandlerTable::ASYNC_AWAIT: case HandlerTable::ASYNC_AWAIT:
prediction = "ASYNC_AWAIT"; prediction = "ASYNC_AWAIT";
break; break;
case HandlerTable::PROMISE:
// Catch prediction resulting in promise rejections aren't
// parsed by the parser.
UNREACHABLE();
} }
Print(" %s\n", prediction); Print(" %s\n", prediction);
} }
@ -1019,6 +1026,9 @@ void AstPrinter::PrintObjectProperties(
case ObjectLiteral::Property::SETTER: case ObjectLiteral::Property::SETTER:
prop_kind = "SETTER"; prop_kind = "SETTER";
break; break;
case ObjectLiteral::Property::SPREAD:
prop_kind = "SPREAD";
break;
} }
EmbeddedVector<char, 128> buf; EmbeddedVector<char, 128> buf;
SNPrintF(buf, "PROPERTY - %s", prop_kind); SNPrintF(buf, "PROPERTY - %s", prop_kind);
@ -1136,7 +1146,14 @@ void AstPrinter::VisitCallNew(CallNew* node) {
void AstPrinter::VisitCallRuntime(CallRuntime* node) { void AstPrinter::VisitCallRuntime(CallRuntime* node) {
EmbeddedVector<char, 128> buf; EmbeddedVector<char, 128> buf;
SNPrintF(buf, "CALL RUNTIME %s", node->debug_name()); if (node->is_jsruntime()) {
SNPrintF(
buf, "CALL RUNTIME %s code = %p", node->debug_name(),
static_cast<void*>(isolate_->context()->get(node->context_index())));
} else {
SNPrintF(buf, "CALL RUNTIME %s", node->debug_name());
}
IndentedScope indent(this, buf.start(), node->position()); IndentedScope indent(this, buf.start(), node->position());
PrintArguments(node->arguments()); PrintArguments(node->arguments());
} }
@ -1181,6 +1198,10 @@ void AstPrinter::VisitEmptyParentheses(EmptyParentheses* node) {
IndentedScope indent(this, "()", node->position()); IndentedScope indent(this, "()", node->position());
} }
void AstPrinter::VisitGetIterator(GetIterator* node) {
IndentedScope indent(this, "GET-ITERATOR", node->position());
Visit(node->iterable());
}
void AstPrinter::VisitThisFunction(ThisFunction* node) { void AstPrinter::VisitThisFunction(ThisFunction* node) {
IndentedScope indent(this, "THIS-FUNCTION", node->position()); IndentedScope indent(this, "THIS-FUNCTION", node->position());

View File

@ -15,7 +15,7 @@ namespace internal {
class CallPrinter final : public AstVisitor<CallPrinter> { class CallPrinter final : public AstVisitor<CallPrinter> {
public: public:
explicit CallPrinter(Isolate* isolate, bool is_builtin); explicit CallPrinter(Isolate* isolate, bool is_user_js);
// The following routine prints the node with position |position| into a // The following routine prints the node with position |position| into a
// string. // string.
@ -38,7 +38,7 @@ class CallPrinter final : public AstVisitor<CallPrinter> {
int position_; // position of ast node to print int position_; // position of ast node to print
bool found_; bool found_;
bool done_; bool done_;
bool is_builtin_; bool is_user_js_;
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS(); DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();

View File

@ -9,12 +9,27 @@
#include "src/accessors.h" #include "src/accessors.h"
#include "src/ast/ast.h" #include "src/ast/ast.h"
#include "src/bootstrapper.h" #include "src/bootstrapper.h"
#include "src/counters.h"
#include "src/messages.h" #include "src/messages.h"
#include "src/objects-inl.h"
#include "src/objects/module-info.h"
#include "src/parsing/parse-info.h" #include "src/parsing/parse-info.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
namespace {
void* kDummyPreParserVariable = reinterpret_cast<void*>(0x1);
void* kDummyPreParserLexicalVariable = reinterpret_cast<void*>(0x2);
bool IsLexical(Variable* variable) {
if (variable == kDummyPreParserLexicalVariable) return true;
if (variable == kDummyPreParserVariable) return false;
return IsLexicalVariableMode(variable->mode());
}
} // namespace
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Implementation of LocalsMap // Implementation of LocalsMap
// //
@ -49,6 +64,19 @@ Variable* VariableMap::Declare(Zone* zone, Scope* scope,
return reinterpret_cast<Variable*>(p->value); return reinterpret_cast<Variable*>(p->value);
} }
void VariableMap::DeclareName(Zone* zone, const AstRawString* name,
VariableMode mode) {
Entry* p =
ZoneHashMap::LookupOrInsert(const_cast<AstRawString*>(name), name->hash(),
ZoneAllocationPolicy(zone));
if (p->value == nullptr) {
// The variable has not been declared yet -> insert it.
DCHECK_EQ(name, p->key);
p->value =
mode == VAR ? kDummyPreParserVariable : kDummyPreParserLexicalVariable;
}
}
void VariableMap::Remove(Variable* var) { void VariableMap::Remove(Variable* var) {
const AstRawString* name = var->raw_name(); const AstRawString* name = var->raw_name();
ZoneHashMap::Remove(const_cast<AstRawString*>(name), name->hash()); ZoneHashMap::Remove(const_cast<AstRawString*>(name), name->hash());
@ -74,21 +102,27 @@ Variable* VariableMap::Lookup(const AstRawString* name) {
return NULL; return NULL;
} }
void SloppyBlockFunctionMap::Delegate::set_statement(Statement* statement) {
if (statement_ != nullptr) {
statement_->set_statement(statement);
}
}
SloppyBlockFunctionMap::SloppyBlockFunctionMap(Zone* zone) SloppyBlockFunctionMap::SloppyBlockFunctionMap(Zone* zone)
: ZoneHashMap(8, ZoneAllocationPolicy(zone)) {} : ZoneHashMap(8, ZoneAllocationPolicy(zone)) {}
void SloppyBlockFunctionMap::Declare(Zone* zone, const AstRawString* name, void SloppyBlockFunctionMap::Declare(
SloppyBlockFunctionStatement* stmt) { Zone* zone, const AstRawString* name,
SloppyBlockFunctionMap::Delegate* delegate) {
// AstRawStrings are unambiguous, i.e., the same string is always represented // AstRawStrings are unambiguous, i.e., the same string is always represented
// by the same AstRawString*. // by the same AstRawString*.
Entry* p = Entry* p =
ZoneHashMap::LookupOrInsert(const_cast<AstRawString*>(name), name->hash(), ZoneHashMap::LookupOrInsert(const_cast<AstRawString*>(name), name->hash(),
ZoneAllocationPolicy(zone)); ZoneAllocationPolicy(zone));
stmt->set_next(static_cast<SloppyBlockFunctionStatement*>(p->value)); delegate->set_next(static_cast<SloppyBlockFunctionMap::Delegate*>(p->value));
p->value = stmt; p->value = delegate;
} }
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Implementation of Scope // Implementation of Scope
@ -243,8 +277,7 @@ Scope::Scope(Zone* zone, const AstRawString* catch_variable_name,
// Cache the catch variable, even though it's also available via the // Cache the catch variable, even though it's also available via the
// scope_info, as the parser expects that a catch scope always has the catch // scope_info, as the parser expects that a catch scope always has the catch
// variable as first and only variable. // variable as first and only variable.
Variable* variable = Declare(zone, this, catch_variable_name, VAR, Variable* variable = Declare(zone, catch_variable_name, VAR);
NORMAL_VARIABLE, kCreatedInitialized);
AllocateHeapSlot(variable); AllocateHeapSlot(variable);
} }
@ -263,7 +296,14 @@ void DeclarationScope::SetDefaults() {
arguments_ = nullptr; arguments_ = nullptr;
this_function_ = nullptr; this_function_ = nullptr;
should_eager_compile_ = false; should_eager_compile_ = false;
is_lazily_parsed_ = false; was_lazily_parsed_ = false;
#ifdef DEBUG
DeclarationScope* outer_declaration_scope =
outer_scope_ ? outer_scope_->GetDeclarationScope() : nullptr;
is_being_lazily_parsed_ =
outer_declaration_scope ? outer_declaration_scope->is_being_lazily_parsed_
: false;
#endif
} }
void Scope::SetDefaults() { void Scope::SetDefaults() {
@ -305,7 +345,7 @@ bool DeclarationScope::ShouldEagerCompile() const {
} }
void DeclarationScope::set_should_eager_compile() { void DeclarationScope::set_should_eager_compile() {
should_eager_compile_ = !is_lazily_parsed_; should_eager_compile_ = !was_lazily_parsed_;
} }
void DeclarationScope::set_asm_module() { void DeclarationScope::set_asm_module() {
@ -354,17 +394,16 @@ Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone,
} }
DCHECK(!scope_info->HasOuterScopeInfo()); DCHECK(!scope_info->HasOuterScopeInfo());
break; break;
} else if (scope_info->scope_type() == FUNCTION_SCOPE || } else if (scope_info->scope_type() == FUNCTION_SCOPE) {
scope_info->scope_type() == EVAL_SCOPE) {
// TODO(neis): For an eval scope, we currently create an ordinary function
// context. This is wrong and needs to be fixed.
// https://bugs.chromium.org/p/v8/issues/detail?id=5295
outer_scope = outer_scope =
new (zone) DeclarationScope(zone, FUNCTION_SCOPE, handle(scope_info)); new (zone) DeclarationScope(zone, FUNCTION_SCOPE, handle(scope_info));
if (scope_info->IsAsmFunction()) if (scope_info->IsAsmFunction())
outer_scope->AsDeclarationScope()->set_asm_function(); outer_scope->AsDeclarationScope()->set_asm_function();
if (scope_info->IsAsmModule()) if (scope_info->IsAsmModule())
outer_scope->AsDeclarationScope()->set_asm_module(); outer_scope->AsDeclarationScope()->set_asm_module();
} else if (scope_info->scope_type() == EVAL_SCOPE) {
outer_scope =
new (zone) DeclarationScope(zone, EVAL_SCOPE, handle(scope_info));
} else if (scope_info->scope_type() == BLOCK_SCOPE) { } else if (scope_info->scope_type() == BLOCK_SCOPE) {
if (scope_info->is_declaration_scope()) { if (scope_info->is_declaration_scope()) {
outer_scope = outer_scope =
@ -424,11 +463,21 @@ int Scope::num_parameters() const {
return is_declaration_scope() ? AsDeclarationScope()->num_parameters() : 0; return is_declaration_scope() ? AsDeclarationScope()->num_parameters() : 0;
} }
void DeclarationScope::DeclareSloppyBlockFunction(
const AstRawString* name, Scope* scope,
SloppyBlockFunctionStatement* statement) {
auto* delegate =
new (zone()) SloppyBlockFunctionMap::Delegate(scope, statement);
sloppy_block_function_map_.Declare(zone(), name, delegate);
}
void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) { void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
DCHECK(is_sloppy(language_mode())); DCHECK(is_sloppy(language_mode()));
DCHECK(is_function_scope() || is_eval_scope() || is_script_scope() || DCHECK(is_function_scope() || is_eval_scope() || is_script_scope() ||
(is_block_scope() && outer_scope()->is_function_scope())); (is_block_scope() && outer_scope()->is_function_scope()));
DCHECK(HasSimpleParameters() || is_block_scope()); DCHECK(HasSimpleParameters() || is_block_scope() || is_being_lazily_parsed_);
DCHECK_EQ(factory == nullptr, is_being_lazily_parsed_);
bool has_simple_parameters = HasSimpleParameters(); bool has_simple_parameters = HasSimpleParameters();
// For each variable which is used as a function declaration in a sloppy // For each variable which is used as a function declaration in a sloppy
// block, // block,
@ -460,7 +509,7 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
bool var_created = false; bool var_created = false;
// Write in assignments to var for each block-scoped function declaration // Write in assignments to var for each block-scoped function declaration
auto delegates = static_cast<SloppyBlockFunctionStatement*>(p->value); auto delegates = static_cast<SloppyBlockFunctionMap::Delegate*>(p->value);
DeclarationScope* decl_scope = this; DeclarationScope* decl_scope = this;
while (decl_scope->is_eval_scope()) { while (decl_scope->is_eval_scope()) {
@ -468,7 +517,7 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
} }
Scope* outer_scope = decl_scope->outer_scope(); Scope* outer_scope = decl_scope->outer_scope();
for (SloppyBlockFunctionStatement* delegate = delegates; for (SloppyBlockFunctionMap::Delegate* delegate = delegates;
delegate != nullptr; delegate = delegate->next()) { delegate != nullptr; delegate = delegate->next()) {
// Check if there's a conflict with a lexical declaration // Check if there's a conflict with a lexical declaration
Scope* query_scope = delegate->scope()->outer_scope(); Scope* query_scope = delegate->scope()->outer_scope();
@ -482,7 +531,7 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
// `{ let e; try {} catch (e) { function e(){} } }` // `{ let e; try {} catch (e) { function e(){} } }`
do { do {
var = query_scope->LookupLocal(name); var = query_scope->LookupLocal(name);
if (var != nullptr && IsLexicalVariableMode(var->mode())) { if (var != nullptr && IsLexical(var)) {
should_hoist = false; should_hoist = false;
break; break;
} }
@ -494,30 +543,39 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) {
// Declare a var-style binding for the function in the outer scope // Declare a var-style binding for the function in the outer scope
if (!var_created) { if (!var_created) {
var_created = true; var_created = true;
VariableProxy* proxy = factory->NewVariableProxy(name, NORMAL_VARIABLE); if (factory) {
Declaration* declaration = VariableProxy* proxy =
factory->NewVariableDeclaration(proxy, this, kNoSourcePosition); factory->NewVariableProxy(name, NORMAL_VARIABLE);
// Based on the preceding check, it doesn't matter what we pass as auto declaration =
// allow_harmony_restrictive_generators and factory->NewVariableDeclaration(proxy, this, kNoSourcePosition);
// sloppy_mode_block_scope_function_redefinition. // Based on the preceding check, it doesn't matter what we pass as
bool ok = true; // allow_harmony_restrictive_generators and
DeclareVariable(declaration, VAR, // sloppy_mode_block_scope_function_redefinition.
Variable::DefaultInitializationFlag(VAR), false, bool ok = true;
nullptr, &ok); DeclareVariable(declaration, VAR,
CHECK(ok); // Based on the preceding check, this should not fail Variable::DefaultInitializationFlag(VAR), false,
nullptr, &ok);
CHECK(ok); // Based on the preceding check, this should not fail
} else {
DeclareVariableName(name, VAR);
}
} }
Expression* assignment = factory->NewAssignment( if (factory) {
Token::ASSIGN, NewUnresolved(factory, name), Expression* assignment = factory->NewAssignment(
delegate->scope()->NewUnresolved(factory, name), kNoSourcePosition); Token::ASSIGN, NewUnresolved(factory, name),
Statement* statement = delegate->scope()->NewUnresolved(factory, name), kNoSourcePosition);
factory->NewExpressionStatement(assignment, kNoSourcePosition); Statement* statement =
delegate->set_statement(statement); factory->NewExpressionStatement(assignment, kNoSourcePosition);
delegate->set_statement(statement);
}
} }
} }
} }
void DeclarationScope::Analyze(ParseInfo* info, AnalyzeMode mode) { void DeclarationScope::Analyze(ParseInfo* info, AnalyzeMode mode) {
RuntimeCallTimerScope runtimeTimer(info->isolate(),
&RuntimeCallStats::CompileScopeAnalysis);
DCHECK(info->literal() != NULL); DCHECK(info->literal() != NULL);
DeclarationScope* scope = info->literal()->scope(); DeclarationScope* scope = info->literal()->scope();
@ -542,13 +600,15 @@ void DeclarationScope::Analyze(ParseInfo* info, AnalyzeMode mode) {
scope->HoistSloppyBlockFunctions(&factory); scope->HoistSloppyBlockFunctions(&factory);
} }
// We are compiling one of three cases: // We are compiling one of four cases:
// 1) top-level code, // 1) top-level code,
// 2) a function/eval/module on the top-level // 2) a function/eval/module on the top-level
// 3) a function/eval in a scope that was already resolved. // 3) a function/eval in a scope that was already resolved.
// 4) an asm.js function
DCHECK(scope->scope_type() == SCRIPT_SCOPE || DCHECK(scope->scope_type() == SCRIPT_SCOPE ||
scope->outer_scope()->scope_type() == SCRIPT_SCOPE || scope->outer_scope()->scope_type() == SCRIPT_SCOPE ||
scope->outer_scope()->already_resolved_); scope->outer_scope()->already_resolved_ ||
(info->asm_function_scope() && scope->is_function_scope()));
// The outer scope is never lazy. // The outer scope is never lazy.
scope->set_should_eager_compile(); scope->set_should_eager_compile();
@ -577,11 +637,11 @@ void DeclarationScope::DeclareThis(AstValueFactory* ast_value_factory) {
DCHECK(is_declaration_scope()); DCHECK(is_declaration_scope());
DCHECK(has_this_declaration()); DCHECK(has_this_declaration());
bool subclass_constructor = IsSubclassConstructor(function_kind_); bool derived_constructor = IsDerivedConstructor(function_kind_);
Variable* var = Declare( Variable* var =
zone(), this, ast_value_factory->this_string(), Declare(zone(), ast_value_factory->this_string(),
subclass_constructor ? CONST : VAR, THIS_VARIABLE, derived_constructor ? CONST : VAR, THIS_VARIABLE,
subclass_constructor ? kNeedsInitialization : kCreatedInitialized); derived_constructor ? kNeedsInitialization : kCreatedInitialized);
receiver_ = var; receiver_ = var;
} }
@ -594,8 +654,7 @@ void DeclarationScope::DeclareArguments(AstValueFactory* ast_value_factory) {
// Declare 'arguments' variable which exists in all non arrow functions. // Declare 'arguments' variable which exists in all non arrow functions.
// Note that it might never be accessed, in which case it won't be // Note that it might never be accessed, in which case it won't be
// allocated during variable allocation. // allocated during variable allocation.
arguments_ = Declare(zone(), this, ast_value_factory->arguments_string(), arguments_ = Declare(zone(), ast_value_factory->arguments_string(), VAR);
VAR, NORMAL_VARIABLE, kCreatedInitialized);
} else if (IsLexicalVariableMode(arguments_->mode())) { } else if (IsLexicalVariableMode(arguments_->mode())) {
// Check if there's lexically declared variable named arguments to avoid // Check if there's lexically declared variable named arguments to avoid
// redeclaration. See ES#sec-functiondeclarationinstantiation, step 20. // redeclaration. See ES#sec-functiondeclarationinstantiation, step 20.
@ -609,14 +668,12 @@ void DeclarationScope::DeclareDefaultFunctionVariables(
DCHECK(!is_arrow_scope()); DCHECK(!is_arrow_scope());
DeclareThis(ast_value_factory); DeclareThis(ast_value_factory);
new_target_ = Declare(zone(), this, ast_value_factory->new_target_string(), new_target_ = Declare(zone(), ast_value_factory->new_target_string(), CONST);
CONST, NORMAL_VARIABLE, kCreatedInitialized);
if (IsConciseMethod(function_kind_) || IsClassConstructor(function_kind_) || if (IsConciseMethod(function_kind_) || IsClassConstructor(function_kind_) ||
IsAccessorFunction(function_kind_)) { IsAccessorFunction(function_kind_)) {
this_function_ = this_function_ =
Declare(zone(), this, ast_value_factory->this_function_string(), CONST, Declare(zone(), ast_value_factory->this_function_string(), CONST);
NORMAL_VARIABLE, kCreatedInitialized);
} }
} }
@ -637,23 +694,12 @@ Variable* DeclarationScope::DeclareFunctionVar(const AstRawString* name) {
} }
bool Scope::HasBeenRemoved() const { bool Scope::HasBeenRemoved() const {
// TODO(neis): Store this information somewhere instead of calculating it. if (sibling() == this) {
DCHECK_NULL(inner_scope_);
if (!is_block_scope()) return false; // Shortcut. DCHECK(is_block_scope());
return true;
Scope* parent = outer_scope();
if (parent == nullptr) {
DCHECK(is_script_scope());
return false;
} }
return false;
Scope* sibling = parent->inner_scope();
for (; sibling != nullptr; sibling = sibling->sibling()) {
if (sibling == this) return false;
}
DCHECK_NULL(inner_scope_);
return true;
} }
Scope* Scope::GetUnremovedScope() { Scope* Scope::GetUnremovedScope() {
@ -667,6 +713,7 @@ Scope* Scope::GetUnremovedScope() {
Scope* Scope::FinalizeBlockScope() { Scope* Scope::FinalizeBlockScope() {
DCHECK(is_block_scope()); DCHECK(is_block_scope());
DCHECK(!HasBeenRemoved());
if (variables_.occupancy() > 0 || if (variables_.occupancy() > 0 ||
(is_declaration_scope() && calls_sloppy_eval())) { (is_declaration_scope() && calls_sloppy_eval())) {
@ -705,7 +752,12 @@ Scope* Scope::FinalizeBlockScope() {
PropagateUsageFlagsToScope(outer_scope_); PropagateUsageFlagsToScope(outer_scope_);
// This block does not need a context. // This block does not need a context.
num_heap_slots_ = 0; num_heap_slots_ = 0;
return NULL;
// Mark scope as removed by making it its own sibling.
sibling_ = this;
DCHECK(HasBeenRemoved());
return nullptr;
} }
void DeclarationScope::AddLocal(Variable* var) { void DeclarationScope::AddLocal(Variable* var) {
@ -715,13 +767,13 @@ void DeclarationScope::AddLocal(Variable* var) {
locals_.Add(var); locals_.Add(var);
} }
Variable* Scope::Declare(Zone* zone, Scope* scope, const AstRawString* name, Variable* Scope::Declare(Zone* zone, const AstRawString* name,
VariableMode mode, VariableKind kind, VariableMode mode, VariableKind kind,
InitializationFlag initialization_flag, InitializationFlag initialization_flag,
MaybeAssignedFlag maybe_assigned_flag) { MaybeAssignedFlag maybe_assigned_flag) {
bool added; bool added;
Variable* var = Variable* var =
variables_.Declare(zone, scope, name, mode, kind, initialization_flag, variables_.Declare(zone, this, name, mode, kind, initialization_flag,
maybe_assigned_flag, &added); maybe_assigned_flag, &added);
if (added) locals_.Add(var); if (added) locals_.Add(var);
return var; return var;
@ -796,6 +848,7 @@ void Scope::PropagateUsageFlagsToScope(Scope* other) {
DCHECK(!already_resolved_); DCHECK(!already_resolved_);
DCHECK(!other->already_resolved_); DCHECK(!other->already_resolved_);
if (calls_eval()) other->RecordEvalCall(); if (calls_eval()) other->RecordEvalCall();
if (inner_scope_calls_eval_) other->inner_scope_calls_eval_ = true;
} }
Variable* Scope::LookupInScopeInfo(const AstRawString* name) { Variable* Scope::LookupInScopeInfo(const AstRawString* name) {
@ -869,12 +922,13 @@ Variable* DeclarationScope::DeclareParameter(
DCHECK(is_function_scope() || is_module_scope()); DCHECK(is_function_scope() || is_module_scope());
DCHECK(!has_rest_); DCHECK(!has_rest_);
DCHECK(!is_optional || !is_rest); DCHECK(!is_optional || !is_rest);
DCHECK(!is_being_lazily_parsed_);
DCHECK(!was_lazily_parsed_);
Variable* var; Variable* var;
if (mode == TEMPORARY) { if (mode == TEMPORARY) {
var = NewTemporary(name); var = NewTemporary(name);
} else { } else {
var = var = Declare(zone(), name, mode);
Declare(zone(), this, name, mode, NORMAL_VARIABLE, kCreatedInitialized);
// TODO(wingo): Avoid O(n^2) check. // TODO(wingo): Avoid O(n^2) check.
*is_duplicate = IsDeclaredParameter(name); *is_duplicate = IsDeclaredParameter(name);
} }
@ -894,8 +948,9 @@ Variable* Scope::DeclareLocal(const AstRawString* name, VariableMode mode,
// introduced during variable allocation, and TEMPORARY variables are // introduced during variable allocation, and TEMPORARY variables are
// allocated via NewTemporary(). // allocated via NewTemporary().
DCHECK(IsDeclaredVariableMode(mode)); DCHECK(IsDeclaredVariableMode(mode));
return Declare(zone(), this, name, mode, kind, init_flag, DCHECK(!GetDeclarationScope()->is_being_lazily_parsed());
maybe_assigned_flag); DCHECK(!GetDeclarationScope()->was_lazily_parsed());
return Declare(zone(), name, mode, kind, init_flag, maybe_assigned_flag);
} }
Variable* Scope::DeclareVariable( Variable* Scope::DeclareVariable(
@ -904,6 +959,8 @@ Variable* Scope::DeclareVariable(
bool* sloppy_mode_block_scope_function_redefinition, bool* ok) { bool* sloppy_mode_block_scope_function_redefinition, bool* ok) {
DCHECK(IsDeclaredVariableMode(mode)); DCHECK(IsDeclaredVariableMode(mode));
DCHECK(!already_resolved_); DCHECK(!already_resolved_);
DCHECK(!GetDeclarationScope()->is_being_lazily_parsed());
DCHECK(!GetDeclarationScope()->was_lazily_parsed());
if (mode == VAR && !is_declaration_scope()) { if (mode == VAR && !is_declaration_scope()) {
return GetDeclarationScope()->DeclareVariable( return GetDeclarationScope()->DeclareVariable(
@ -1002,6 +1059,28 @@ Variable* Scope::DeclareVariable(
return var; return var;
} }
void Scope::DeclareVariableName(const AstRawString* name, VariableMode mode) {
DCHECK(IsDeclaredVariableMode(mode));
DCHECK(!already_resolved_);
DCHECK(GetDeclarationScope()->is_being_lazily_parsed());
if (mode == VAR && !is_declaration_scope()) {
return GetDeclarationScope()->DeclareVariableName(name, mode);
}
DCHECK(!is_with_scope());
DCHECK(!is_eval_scope());
// Unlike DeclareVariable, DeclareVariableName allows declaring variables in
// catch scopes: Parser::RewriteCatchPattern bypasses DeclareVariable by
// calling DeclareLocal directly, and it doesn't make sense to add a similar
// bypass mechanism for PreParser.
DCHECK(is_declaration_scope() || (IsLexicalVariableMode(mode) &&
(is_block_scope() || is_catch_scope())));
DCHECK(scope_info_.is_null());
// Declare the variable in the declaration scope.
variables_.DeclareName(zone(), name, mode);
}
VariableProxy* Scope::NewUnresolved(AstNodeFactory* factory, VariableProxy* Scope::NewUnresolved(AstNodeFactory* factory,
const AstRawString* name, const AstRawString* name,
int start_position, VariableKind kind) { int start_position, VariableKind kind) {
@ -1009,7 +1088,7 @@ VariableProxy* Scope::NewUnresolved(AstNodeFactory* factory,
// the same name because they may be removed selectively via // the same name because they may be removed selectively via
// RemoveUnresolved(). // RemoveUnresolved().
DCHECK(!already_resolved_); DCHECK(!already_resolved_);
DCHECK_EQ(!needs_migration_, factory->zone() == zone()); DCHECK_EQ(factory->zone(), zone());
VariableProxy* proxy = factory->NewVariableProxy(name, kind, start_position); VariableProxy* proxy = factory->NewVariableProxy(name, kind, start_position);
proxy->set_next_unresolved(unresolved_); proxy->set_next_unresolved(unresolved_);
unresolved_ = proxy; unresolved_ = proxy;
@ -1026,8 +1105,7 @@ void Scope::AddUnresolved(VariableProxy* proxy) {
Variable* DeclarationScope::DeclareDynamicGlobal(const AstRawString* name, Variable* DeclarationScope::DeclareDynamicGlobal(const AstRawString* name,
VariableKind kind) { VariableKind kind) {
DCHECK(is_script_scope()); DCHECK(is_script_scope());
return variables_.Declare(zone(), this, name, DYNAMIC_GLOBAL, kind, return variables_.Declare(zone(), this, name, DYNAMIC_GLOBAL, kind);
kCreatedInitialized);
} }
@ -1050,26 +1128,6 @@ bool Scope::RemoveUnresolved(VariableProxy* var) {
return false; return false;
} }
bool Scope::RemoveUnresolved(const AstRawString* name) {
if (unresolved_ != nullptr && unresolved_->raw_name() == name) {
VariableProxy* removed = unresolved_;
unresolved_ = unresolved_->next_unresolved();
removed->set_next_unresolved(nullptr);
return true;
}
VariableProxy* current = unresolved_;
while (current != nullptr) {
VariableProxy* next = current->next_unresolved();
if (next != nullptr && next->raw_name() == name) {
current->set_next_unresolved(next->next_unresolved());
next->set_next_unresolved(nullptr);
return true;
}
current = next;
}
return false;
}
Variable* Scope::NewTemporary(const AstRawString* name) { Variable* Scope::NewTemporary(const AstRawString* name) {
DeclarationScope* scope = GetClosureScope(); DeclarationScope* scope = GetClosureScope();
Variable* var = new (zone()) Variable* var = new (zone())
@ -1157,9 +1215,9 @@ bool Scope::AllowsLazyParsingWithoutUnresolvedVariables(
// guaranteed to be correct. // guaranteed to be correct.
for (const Scope* s = this; s != outer; s = s->outer_scope_) { for (const Scope* s = this; s != outer; s = s->outer_scope_) {
// Eval forces context allocation on all outer scopes, so we don't need to // Eval forces context allocation on all outer scopes, so we don't need to
// look at those scopes. Sloppy eval makes all top-level variables dynamic, // look at those scopes. Sloppy eval makes top-level non-lexical variables
// whereas strict-mode requires context allocation. // dynamic, whereas strict-mode requires context allocation.
if (s->is_eval_scope()) return !is_strict(s->language_mode()); if (s->is_eval_scope()) return is_sloppy(s->language_mode());
// Catch scopes force context allocation of all variables. // Catch scopes force context allocation of all variables.
if (s->is_catch_scope()) continue; if (s->is_catch_scope()) continue;
// With scopes do not introduce variables that need allocation. // With scopes do not introduce variables that need allocation.
@ -1276,7 +1334,7 @@ Scope* Scope::GetOuterScopeWithContext() {
Handle<StringSet> DeclarationScope::CollectNonLocals( Handle<StringSet> DeclarationScope::CollectNonLocals(
ParseInfo* info, Handle<StringSet> non_locals) { ParseInfo* info, Handle<StringSet> non_locals) {
VariableProxy* free_variables = FetchFreeVariables(this, true, info); VariableProxy* free_variables = FetchFreeVariables(this, info);
for (VariableProxy* proxy = free_variables; proxy != nullptr; for (VariableProxy* proxy = free_variables; proxy != nullptr;
proxy = proxy->next_unresolved()) { proxy = proxy->next_unresolved()) {
non_locals = StringSet::Add(non_locals, proxy->name()); non_locals = StringSet::Add(non_locals, proxy->name());
@ -1292,21 +1350,30 @@ void DeclarationScope::ResetAfterPreparsing(AstValueFactory* ast_value_factory,
params_.Clear(); params_.Clear();
decls_.Clear(); decls_.Clear();
locals_.Clear(); locals_.Clear();
sloppy_block_function_map_.Clear();
variables_.Clear();
// Make sure we won't walk the scope tree from here on.
inner_scope_ = nullptr; inner_scope_ = nullptr;
unresolved_ = nullptr; unresolved_ = nullptr;
if (aborted && !IsArrowFunction(function_kind_)) { if (aborted) {
DeclareDefaultFunctionVariables(ast_value_factory); // Prepare scope for use in the outer zone.
zone_ = ast_value_factory->zone();
variables_.Reset(ZoneAllocationPolicy(zone_));
sloppy_block_function_map_.Reset(ZoneAllocationPolicy(zone_));
if (!IsArrowFunction(function_kind_)) {
DeclareDefaultFunctionVariables(ast_value_factory);
}
} else {
// Make sure this scope isn't used for allocation anymore.
zone_ = nullptr;
variables_.Invalidate();
sloppy_block_function_map_.Invalidate();
} }
#ifdef DEBUG #ifdef DEBUG
needs_migration_ = false; needs_migration_ = false;
is_being_lazily_parsed_ = false;
#endif #endif
is_lazily_parsed_ = !aborted; was_lazily_parsed_ = !aborted;
} }
void DeclarationScope::AnalyzePartially(AstNodeFactory* ast_node_factory) { void DeclarationScope::AnalyzePartially(AstNodeFactory* ast_node_factory) {
@ -1317,9 +1384,8 @@ void DeclarationScope::AnalyzePartially(AstNodeFactory* ast_node_factory) {
// Try to resolve unresolved variables for this Scope and migrate those // Try to resolve unresolved variables for this Scope and migrate those
// which cannot be resolved inside. It doesn't make sense to try to resolve // which cannot be resolved inside. It doesn't make sense to try to resolve
// them in the outer Scopes here, because they are incomplete. // them in the outer Scopes here, because they are incomplete.
for (VariableProxy* proxy = for (VariableProxy* proxy = FetchFreeVariables(this); proxy != nullptr;
FetchFreeVariables(this, !FLAG_lazy_inner_functions); proxy = proxy->next_unresolved()) {
proxy != nullptr; proxy = proxy->next_unresolved()) {
DCHECK(!proxy->is_resolved()); DCHECK(!proxy->is_resolved());
VariableProxy* copy = ast_node_factory->CopyVariableProxy(proxy); VariableProxy* copy = ast_node_factory->CopyVariableProxy(proxy);
copy->set_next_unresolved(unresolved); copy->set_next_unresolved(unresolved);
@ -1339,8 +1405,10 @@ void DeclarationScope::AnalyzePartially(AstNodeFactory* ast_node_factory) {
} }
#ifdef DEBUG #ifdef DEBUG
static const char* Header(ScopeType scope_type, FunctionKind function_kind, namespace {
bool is_declaration_scope) {
const char* Header(ScopeType scope_type, FunctionKind function_kind,
bool is_declaration_scope) {
switch (scope_type) { switch (scope_type) {
case EVAL_SCOPE: return "eval"; case EVAL_SCOPE: return "eval";
// TODO(adamk): Should we print concise method scopes specially? // TODO(adamk): Should we print concise method scopes specially?
@ -1359,18 +1427,13 @@ static const char* Header(ScopeType scope_type, FunctionKind function_kind,
return NULL; return NULL;
} }
void Indent(int n, const char* str) { PrintF("%*s%s", n, "", str); }
static void Indent(int n, const char* str) { void PrintName(const AstRawString* name) {
PrintF("%*s%s", n, "", str);
}
static void PrintName(const AstRawString* name) {
PrintF("%.*s", name->length(), name->raw_data()); PrintF("%.*s", name->length(), name->raw_data());
} }
void PrintLocation(Variable* var) {
static void PrintLocation(Variable* var) {
switch (var->location()) { switch (var->location()) {
case VariableLocation::UNALLOCATED: case VariableLocation::UNALLOCATED:
break; break;
@ -1392,45 +1455,48 @@ static void PrintLocation(Variable* var) {
} }
} }
void PrintVar(int indent, Variable* var) {
static void PrintVar(int indent, Variable* var) { Indent(indent, VariableMode2String(var->mode()));
if (var->is_used() || !var->IsUnallocated()) { PrintF(" ");
Indent(indent, VariableMode2String(var->mode())); if (var->raw_name()->IsEmpty())
PrintF(" "); PrintF(".%p", reinterpret_cast<void*>(var));
if (var->raw_name()->IsEmpty()) else
PrintF(".%p", reinterpret_cast<void*>(var)); PrintName(var->raw_name());
else PrintF("; // ");
PrintName(var->raw_name()); PrintLocation(var);
PrintF("; // "); bool comma = !var->IsUnallocated();
PrintLocation(var); if (var->has_forced_context_allocation()) {
bool comma = !var->IsUnallocated(); if (comma) PrintF(", ");
if (var->has_forced_context_allocation()) { PrintF("forced context allocation");
if (comma) PrintF(", "); comma = true;
PrintF("forced context allocation");
comma = true;
}
if (var->maybe_assigned() == kNotAssigned) {
if (comma) PrintF(", ");
PrintF("never assigned");
}
PrintF("\n");
} }
if (var->maybe_assigned() == kNotAssigned) {
if (comma) PrintF(", ");
PrintF("never assigned");
}
PrintF("\n");
} }
static void PrintMap(int indent, VariableMap* map, bool locals) { void PrintMap(int indent, const char* label, VariableMap* map, bool locals,
Variable* function_var) {
bool printed_label = false;
for (VariableMap::Entry* p = map->Start(); p != nullptr; p = map->Next(p)) { for (VariableMap::Entry* p = map->Start(); p != nullptr; p = map->Next(p)) {
Variable* var = reinterpret_cast<Variable*>(p->value); Variable* var = reinterpret_cast<Variable*>(p->value);
if (var == function_var) continue;
bool local = !IsDynamicVariableMode(var->mode()); bool local = !IsDynamicVariableMode(var->mode());
if (locals ? local : !local) { if ((locals ? local : !local) &&
if (var == nullptr) { (var->is_used() || !var->IsUnallocated())) {
Indent(indent, "<?>\n"); if (!printed_label) {
} else { Indent(indent, label);
PrintVar(indent, var); printed_label = true;
} }
PrintVar(indent, var);
} }
} }
} }
} // anonymous namespace
void DeclarationScope::PrintParameters() { void DeclarationScope::PrintParameters() {
PrintF(" ("); PrintF(" (");
for (int i = 0; i < params_.length(); i++) { for (int i = 0; i < params_.length(); i++) {
@ -1487,9 +1553,12 @@ void Scope::Print(int n) {
if (inner_scope_calls_eval_) Indent(n1, "// inner scope calls 'eval'\n"); if (inner_scope_calls_eval_) Indent(n1, "// inner scope calls 'eval'\n");
if (is_declaration_scope()) { if (is_declaration_scope()) {
DeclarationScope* scope = AsDeclarationScope(); DeclarationScope* scope = AsDeclarationScope();
if (scope->is_lazily_parsed()) Indent(n1, "// lazily parsed\n"); if (scope->was_lazily_parsed()) Indent(n1, "// lazily parsed\n");
if (scope->ShouldEagerCompile()) Indent(n1, "// will be compiled\n"); if (scope->ShouldEagerCompile()) Indent(n1, "// will be compiled\n");
} }
if (has_forced_context_allocation()) {
Indent(n1, "// forces context allocation\n");
}
if (num_stack_slots_ > 0) { if (num_stack_slots_ > 0) {
Indent(n1, "// "); Indent(n1, "// ");
PrintF("%d stack slots\n", num_stack_slots_); PrintF("%d stack slots\n", num_stack_slots_);
@ -1505,12 +1574,22 @@ void Scope::Print(int n) {
PrintVar(n1, function); PrintVar(n1, function);
} }
if (variables_.Start() != NULL) { // Print temporaries.
Indent(n1, "// local vars:\n"); {
PrintMap(n1, &variables_, true); bool printed_header = false;
for (Variable* local : locals_) {
if (local->mode() != TEMPORARY) continue;
if (!printed_header) {
printed_header = true;
Indent(n1, "// temporary vars:\n");
}
PrintVar(n1, local);
}
}
Indent(n1, "// dynamic vars:\n"); if (variables_.occupancy() > 0) {
PrintMap(n1, &variables_, false); PrintMap(n1, "// local vars:\n", &variables_, true, function);
PrintMap(n1, "// dynamic vars:\n", &variables_, false, function);
} }
// Print inner scopes (disable by providing negative n). // Print inner scopes (disable by providing negative n).
@ -1539,6 +1618,12 @@ void Scope::CheckScopePositions() {
void Scope::CheckZones() { void Scope::CheckZones() {
DCHECK(!needs_migration_); DCHECK(!needs_migration_);
for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) { for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
if (scope->is_declaration_scope() &&
scope->AsDeclarationScope()->was_lazily_parsed()) {
DCHECK_NULL(scope->zone());
DCHECK_NULL(scope->inner_scope_);
continue;
}
CHECK_EQ(scope->zone(), zone()); CHECK_EQ(scope->zone(), zone());
scope->CheckZones(); scope->CheckZones();
} }
@ -1548,8 +1633,7 @@ void Scope::CheckZones() {
Variable* Scope::NonLocal(const AstRawString* name, VariableMode mode) { Variable* Scope::NonLocal(const AstRawString* name, VariableMode mode) {
// Declare a new non-local. // Declare a new non-local.
DCHECK(IsDynamicVariableMode(mode)); DCHECK(IsDynamicVariableMode(mode));
Variable* var = variables_.Declare(zone(), NULL, name, mode, NORMAL_VARIABLE, Variable* var = variables_.Declare(zone(), nullptr, name, mode);
kCreatedInitialized);
// Allocate it by giving it a dynamic lookup. // Allocate it by giving it a dynamic lookup.
var->AllocateTo(VariableLocation::LOOKUP, -1); var->AllocateTo(VariableLocation::LOOKUP, -1);
return var; return var;
@ -1590,6 +1674,13 @@ Variable* Scope::LookupRecursive(VariableProxy* proxy, Scope* outer_scope_end) {
// The variable could not be resolved statically. // The variable could not be resolved statically.
if (var == nullptr) return var; if (var == nullptr) return var;
// TODO(marja): Separate LookupRecursive for preparsed scopes better.
if (var == kDummyPreParserVariable || var == kDummyPreParserLexicalVariable) {
DCHECK(GetDeclarationScope()->is_being_lazily_parsed());
DCHECK(FLAG_lazy_inner_functions);
return var;
}
if (is_function_scope() && !var->is_dynamic()) { if (is_function_scope() && !var->is_dynamic()) {
var->ForceContextAllocation(); var->ForceContextAllocation();
} }
@ -1641,34 +1732,20 @@ void Scope::ResolveVariable(ParseInfo* info, VariableProxy* proxy) {
DCHECK(!proxy->is_resolved()); DCHECK(!proxy->is_resolved());
Variable* var = LookupRecursive(proxy, nullptr); Variable* var = LookupRecursive(proxy, nullptr);
ResolveTo(info, proxy, var); ResolveTo(info, proxy, var);
if (FLAG_lazy_inner_functions) {
if (info != nullptr && info->is_native()) return;
// Pessimistically force context allocation for all variables to which inner
// scope variables could potentially resolve to.
Scope* scope = GetClosureScope()->outer_scope_;
while (scope != nullptr && scope->scope_info_.is_null()) {
var = scope->LookupLocal(proxy->raw_name());
if (var != nullptr) {
// Since we don't lazy parse inner arrow functions, inner functions
// cannot refer to the outer "this".
if (!var->is_dynamic() && !var->is_this() &&
!var->has_forced_context_allocation()) {
var->ForceContextAllocation();
var->set_is_used();
// We don't know what the (potentially lazy parsed) inner function
// does with the variable; pessimistically assume that it's assigned.
var->set_maybe_assigned();
}
}
scope = scope->outer_scope_;
}
}
} }
namespace { namespace {
bool AccessNeedsHoleCheck(Variable* var, VariableProxy* proxy, Scope* scope) { bool AccessNeedsHoleCheck(Variable* var, VariableProxy* proxy, Scope* scope) {
if (var->mode() == DYNAMIC_LOCAL) {
// Dynamically introduced variables never need a hole check (since they're
// VAR bindings, either from var or function declarations), but the variable
// they shadow might need a hole check, which we want to do if we decide
// that no shadowing variable was dynamically introoduced.
DCHECK(!var->binding_needs_init());
return AccessNeedsHoleCheck(var->local_if_not_shadowed(), proxy, scope);
}
if (!var->binding_needs_init()) { if (!var->binding_needs_init()) {
return false; return false;
} }
@ -1703,8 +1780,7 @@ bool AccessNeedsHoleCheck(Variable* var, VariableProxy* proxy, Scope* scope) {
} }
if (var->is_this()) { if (var->is_this()) {
DCHECK( DCHECK(IsDerivedConstructor(scope->GetDeclarationScope()->function_kind()));
IsSubclassConstructor(scope->GetDeclarationScope()->function_kind()));
// TODO(littledan): implement 'this' hole check elimination. // TODO(littledan): implement 'this' hole check elimination.
return true; return true;
} }
@ -1749,37 +1825,65 @@ void Scope::ResolveTo(ParseInfo* info, VariableProxy* proxy, Variable* var) {
void Scope::ResolveVariablesRecursively(ParseInfo* info) { void Scope::ResolveVariablesRecursively(ParseInfo* info) {
DCHECK(info->script_scope()->is_script_scope()); DCHECK(info->script_scope()->is_script_scope());
// Lazy parsed declaration scopes are already partially analyzed. If there are
// unresolved references remaining, they just need to be resolved in outer
// scopes.
if (is_declaration_scope() && AsDeclarationScope()->was_lazily_parsed()) {
DCHECK(variables_.occupancy() == 0);
for (VariableProxy* proxy = unresolved_; proxy != nullptr;
proxy = proxy->next_unresolved()) {
Variable* var = outer_scope()->LookupRecursive(proxy, nullptr);
if (!var->is_dynamic()) {
var->set_is_used();
var->ForceContextAllocation();
if (proxy->is_assigned()) var->set_maybe_assigned();
}
}
} else {
// Resolve unresolved variables for this scope.
for (VariableProxy* proxy = unresolved_; proxy != nullptr;
proxy = proxy->next_unresolved()) {
ResolveVariable(info, proxy);
}
// Resolve unresolved variables for this scope. // Resolve unresolved variables for inner scopes.
for (VariableProxy* proxy = unresolved_; proxy != nullptr; for (Scope* scope = inner_scope_; scope != nullptr;
proxy = proxy->next_unresolved()) { scope = scope->sibling_) {
ResolveVariable(info, proxy); scope->ResolveVariablesRecursively(info);
} }
// Resolve unresolved variables for inner scopes.
for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
scope->ResolveVariablesRecursively(info);
} }
} }
VariableProxy* Scope::FetchFreeVariables(DeclarationScope* max_outer_scope, VariableProxy* Scope::FetchFreeVariables(DeclarationScope* max_outer_scope,
bool try_to_resolve, ParseInfo* info, ParseInfo* info,
VariableProxy* stack) { VariableProxy* stack) {
// Lazy parsed declaration scopes are already partially analyzed. If there are
// unresolved references remaining, they just need to be resolved in outer
// scopes.
Scope* lookup =
is_declaration_scope() && AsDeclarationScope()->was_lazily_parsed()
? outer_scope()
: this;
for (VariableProxy *proxy = unresolved_, *next = nullptr; proxy != nullptr; for (VariableProxy *proxy = unresolved_, *next = nullptr; proxy != nullptr;
proxy = next) { proxy = next) {
next = proxy->next_unresolved(); next = proxy->next_unresolved();
DCHECK(!proxy->is_resolved()); DCHECK(!proxy->is_resolved());
Variable* var = nullptr; Variable* var =
if (try_to_resolve) { lookup->LookupRecursive(proxy, max_outer_scope->outer_scope());
var = LookupRecursive(proxy, max_outer_scope->outer_scope());
}
if (var == nullptr) { if (var == nullptr) {
proxy->set_next_unresolved(stack); proxy->set_next_unresolved(stack);
stack = proxy; stack = proxy;
} else if (info != nullptr) { } else if (var != kDummyPreParserVariable &&
ResolveTo(info, proxy, var); var != kDummyPreParserLexicalVariable) {
} else { if (info != nullptr) {
var->set_is_used(); // In this case we need to leave scopes in a way that they can be
// allocated. If we resolved variables from lazy parsed scopes, we need
// to context allocate the var.
ResolveTo(info, proxy, var);
if (!var->is_dynamic() && lookup != this) var->ForceContextAllocation();
} else {
var->set_is_used();
}
} }
} }
@ -1787,8 +1891,7 @@ VariableProxy* Scope::FetchFreeVariables(DeclarationScope* max_outer_scope,
unresolved_ = nullptr; unresolved_ = nullptr;
for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) { for (Scope* scope = inner_scope_; scope != nullptr; scope = scope->sibling_) {
stack = stack = scope->FetchFreeVariables(max_outer_scope, info, stack);
scope->FetchFreeVariables(max_outer_scope, try_to_resolve, info, stack);
} }
return stack; return stack;
@ -1823,7 +1926,10 @@ bool Scope::MustAllocateInContext(Variable* var) {
if (has_forced_context_allocation()) return true; if (has_forced_context_allocation()) return true;
if (var->mode() == TEMPORARY) return false; if (var->mode() == TEMPORARY) return false;
if (is_catch_scope()) return true; if (is_catch_scope()) return true;
if (is_script_scope() && IsLexicalVariableMode(var->mode())) return true; if ((is_script_scope() || is_eval_scope()) &&
IsLexicalVariableMode(var->mode())) {
return true;
}
return var->has_forced_context_allocation() || inner_scope_calls_eval_; return var->has_forced_context_allocation() || inner_scope_calls_eval_;
} }
@ -1880,6 +1986,7 @@ void DeclarationScope::AllocateParameterLocals() {
DCHECK_EQ(this, var->scope()); DCHECK_EQ(this, var->scope());
if (uses_sloppy_arguments) { if (uses_sloppy_arguments) {
var->set_is_used(); var->set_is_used();
var->set_maybe_assigned();
var->ForceContextAllocation(); var->ForceContextAllocation();
} }
AllocateParameter(var, i); AllocateParameter(var, i);
@ -1969,7 +2076,7 @@ void Scope::AllocateVariablesRecursively() {
DCHECK(!already_resolved_); DCHECK(!already_resolved_);
DCHECK_EQ(0, num_stack_slots_); DCHECK_EQ(0, num_stack_slots_);
// Don't allocate variables of preparsed scopes. // Don't allocate variables of preparsed scopes.
if (is_declaration_scope() && AsDeclarationScope()->is_lazily_parsed()) { if (is_declaration_scope() && AsDeclarationScope()->was_lazily_parsed()) {
return; return;
} }
@ -1994,9 +2101,9 @@ void Scope::AllocateVariablesRecursively() {
// Force allocation of a context for this scope if necessary. For a 'with' // Force allocation of a context for this scope if necessary. For a 'with'
// scope and for a function scope that makes an 'eval' call we need a context, // scope and for a function scope that makes an 'eval' call we need a context,
// even if no local variables were statically allocated in the scope. // even if no local variables were statically allocated in the scope.
// Likewise for modules. // Likewise for modules and function scopes representing asm.js modules.
bool must_have_context = bool must_have_context =
is_with_scope() || is_module_scope() || is_with_scope() || is_module_scope() || IsAsmModule() ||
(is_function_scope() && calls_sloppy_eval()) || (is_function_scope() && calls_sloppy_eval()) ||
(is_block_scope() && is_declaration_scope() && calls_sloppy_eval()); (is_block_scope() && is_declaration_scope() && calls_sloppy_eval());

View File

@ -9,6 +9,7 @@
#include "src/base/hashmap.h" #include "src/base/hashmap.h"
#include "src/globals.h" #include "src/globals.h"
#include "src/objects.h" #include "src/objects.h"
#include "src/objects/scope-info.h"
#include "src/zone/zone.h" #include "src/zone/zone.h"
namespace v8 { namespace v8 {
@ -20,6 +21,7 @@ class AstRawString;
class Declaration; class Declaration;
class ParseInfo; class ParseInfo;
class SloppyBlockFunctionStatement; class SloppyBlockFunctionStatement;
class Statement;
class StringSet; class StringSet;
class VariableProxy; class VariableProxy;
@ -28,11 +30,16 @@ class VariableMap: public ZoneHashMap {
public: public:
explicit VariableMap(Zone* zone); explicit VariableMap(Zone* zone);
Variable* Declare(Zone* zone, Scope* scope, const AstRawString* name, Variable* Declare(
VariableMode mode, VariableKind kind, Zone* zone, Scope* scope, const AstRawString* name, VariableMode mode,
InitializationFlag initialization_flag, VariableKind kind = NORMAL_VARIABLE,
MaybeAssignedFlag maybe_assigned_flag = kNotAssigned, InitializationFlag initialization_flag = kCreatedInitialized,
bool* added = nullptr); MaybeAssignedFlag maybe_assigned_flag = kNotAssigned,
bool* added = nullptr);
// Records that "name" exists (if not recorded yet) but doesn't create a
// Variable. Useful for preparsing.
void DeclareName(Zone* zone, const AstRawString* name, VariableMode mode);
Variable* Lookup(const AstRawString* name); Variable* Lookup(const AstRawString* name);
void Remove(Variable* var); void Remove(Variable* var);
@ -43,9 +50,24 @@ class VariableMap: public ZoneHashMap {
// Sloppy block-scoped function declarations to var-bind // Sloppy block-scoped function declarations to var-bind
class SloppyBlockFunctionMap : public ZoneHashMap { class SloppyBlockFunctionMap : public ZoneHashMap {
public: public:
class Delegate : public ZoneObject {
public:
explicit Delegate(Scope* scope,
SloppyBlockFunctionStatement* statement = nullptr)
: scope_(scope), statement_(statement), next_(nullptr) {}
void set_statement(Statement* statement);
void set_next(Delegate* next) { next_ = next; }
Delegate* next() const { return next_; }
Scope* scope() const { return scope_; }
private:
Scope* scope_;
SloppyBlockFunctionStatement* statement_;
Delegate* next_;
};
explicit SloppyBlockFunctionMap(Zone* zone); explicit SloppyBlockFunctionMap(Zone* zone);
void Declare(Zone* zone, const AstRawString* name, void Declare(Zone* zone, const AstRawString* name, Delegate* delegate);
SloppyBlockFunctionStatement* statement);
}; };
enum class AnalyzeMode { kRegular, kDebugger }; enum class AnalyzeMode { kRegular, kDebugger };
@ -148,7 +170,8 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// Declare a local variable in this scope. If the variable has been // Declare a local variable in this scope. If the variable has been
// declared before, the previously declared variable is returned. // declared before, the previously declared variable is returned.
Variable* DeclareLocal(const AstRawString* name, VariableMode mode, Variable* DeclareLocal(const AstRawString* name, VariableMode mode,
InitializationFlag init_flag, VariableKind kind, InitializationFlag init_flag = kCreatedInitialized,
VariableKind kind = NORMAL_VARIABLE,
MaybeAssignedFlag maybe_assigned_flag = kNotAssigned); MaybeAssignedFlag maybe_assigned_flag = kNotAssigned);
Variable* DeclareVariable(Declaration* declaration, VariableMode mode, Variable* DeclareVariable(Declaration* declaration, VariableMode mode,
@ -157,6 +180,8 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
bool* sloppy_mode_block_scope_function_redefinition, bool* sloppy_mode_block_scope_function_redefinition,
bool* ok); bool* ok);
void DeclareVariableName(const AstRawString* name, VariableMode mode);
// Declarations list. // Declarations list.
ThreadedList<Declaration>* declarations() { return &decls_; } ThreadedList<Declaration>* declarations() { return &decls_; }
@ -177,7 +202,6 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// allocated globally as a "ghost" variable. RemoveUnresolved removes // allocated globally as a "ghost" variable. RemoveUnresolved removes
// such a variable again if it was added; otherwise this is a no-op. // such a variable again if it was added; otherwise this is a no-op.
bool RemoveUnresolved(VariableProxy* var); bool RemoveUnresolved(VariableProxy* var);
bool RemoveUnresolved(const AstRawString* name);
// Creates a new temporary variable in this scope's TemporaryScope. The // Creates a new temporary variable in this scope's TemporaryScope. The
// name is only used for printing and cannot be used to find the variable. // name is only used for printing and cannot be used to find the variable.
@ -207,14 +231,11 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// Scope-specific info. // Scope-specific info.
// Inform the scope and outer scopes that the corresponding code contains an // Inform the scope and outer scopes that the corresponding code contains an
// eval call. We don't record eval calls from innner scopes in the outer most // eval call.
// script scope, as we only see those when parsing eagerly. If we recorded the
// calls then, the outer most script scope would look different depending on
// whether we parsed eagerly or not which is undesirable.
void RecordEvalCall() { void RecordEvalCall() {
scope_calls_eval_ = true; scope_calls_eval_ = true;
inner_scope_calls_eval_ = true; inner_scope_calls_eval_ = true;
for (Scope* scope = outer_scope(); scope && !scope->is_script_scope(); for (Scope* scope = outer_scope(); scope != nullptr;
scope = scope->outer_scope()) { scope = scope->outer_scope()) {
scope->inner_scope_calls_eval_ = true; scope->inner_scope_calls_eval_ = true;
} }
@ -303,6 +324,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
bool calls_sloppy_eval() const { bool calls_sloppy_eval() const {
return scope_calls_eval_ && is_sloppy(language_mode()); return scope_calls_eval_ && is_sloppy(language_mode());
} }
bool inner_scope_calls_eval() const { return inner_scope_calls_eval_; }
bool IsAsmModule() const; bool IsAsmModule() const;
bool IsAsmFunction() const; bool IsAsmFunction() const;
// Does this scope have the potential to execute declarations non-linearly? // Does this scope have the potential to execute declarations non-linearly?
@ -423,6 +445,22 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
void set_is_debug_evaluate_scope() { is_debug_evaluate_scope_ = true; } void set_is_debug_evaluate_scope() { is_debug_evaluate_scope_ = true; }
bool is_debug_evaluate_scope() const { return is_debug_evaluate_scope_; } bool is_debug_evaluate_scope() const { return is_debug_evaluate_scope_; }
bool RemoveInnerScope(Scope* inner_scope) {
DCHECK_NOT_NULL(inner_scope);
if (inner_scope == inner_scope_) {
inner_scope_ = inner_scope_->sibling_;
return true;
}
for (Scope* scope = inner_scope_; scope != nullptr;
scope = scope->sibling_) {
if (scope->sibling_ == inner_scope) {
scope->sibling_ = scope->sibling_->sibling_;
return true;
}
}
return false;
}
protected: protected:
explicit Scope(Zone* zone); explicit Scope(Zone* zone);
@ -431,10 +469,11 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
} }
private: private:
Variable* Declare(Zone* zone, Scope* scope, const AstRawString* name, Variable* Declare(
VariableMode mode, VariableKind kind, Zone* zone, const AstRawString* name, VariableMode mode,
InitializationFlag initialization_flag, VariableKind kind = NORMAL_VARIABLE,
MaybeAssignedFlag maybe_assigned_flag = kNotAssigned); InitializationFlag initialization_flag = kCreatedInitialized,
MaybeAssignedFlag maybe_assigned_flag = kNotAssigned);
// This method should only be invoked on scopes created during parsing (i.e., // This method should only be invoked on scopes created during parsing (i.e.,
// not deserialized from a context). Also, since NeedsContext() is only // not deserialized from a context). Also, since NeedsContext() is only
@ -527,7 +566,6 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
// list along the way, so full resolution cannot be done afterwards. // list along the way, so full resolution cannot be done afterwards.
// If a ParseInfo* is passed, non-free variables will be resolved. // If a ParseInfo* is passed, non-free variables will be resolved.
VariableProxy* FetchFreeVariables(DeclarationScope* max_outer_scope, VariableProxy* FetchFreeVariables(DeclarationScope* max_outer_scope,
bool try_to_resolve = true,
ParseInfo* info = nullptr, ParseInfo* info = nullptr,
VariableProxy* stack = nullptr); VariableProxy* stack = nullptr);
@ -556,30 +594,15 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
Handle<ScopeInfo> scope_info); Handle<ScopeInfo> scope_info);
void AddInnerScope(Scope* inner_scope) { void AddInnerScope(Scope* inner_scope) {
DCHECK_EQ(!needs_migration_, inner_scope->zone() == zone());
inner_scope->sibling_ = inner_scope_; inner_scope->sibling_ = inner_scope_;
inner_scope_ = inner_scope; inner_scope_ = inner_scope;
inner_scope->outer_scope_ = this; inner_scope->outer_scope_ = this;
} }
void RemoveInnerScope(Scope* inner_scope) {
DCHECK_NOT_NULL(inner_scope);
if (inner_scope == inner_scope_) {
inner_scope_ = inner_scope_->sibling_;
return;
}
for (Scope* scope = inner_scope_; scope != nullptr;
scope = scope->sibling_) {
if (scope->sibling_ == inner_scope) {
scope->sibling_ = scope->sibling_->sibling_;
return;
}
}
}
void SetDefaults(); void SetDefaults();
friend class DeclarationScope; friend class DeclarationScope;
friend class ScopeTestHelper;
}; };
class DeclarationScope : public Scope { class DeclarationScope : public Scope {
@ -616,7 +639,15 @@ class DeclarationScope : public Scope {
IsClassConstructor(function_kind()))); IsClassConstructor(function_kind())));
} }
bool is_lazily_parsed() const { return is_lazily_parsed_; } bool was_lazily_parsed() const { return was_lazily_parsed_; }
#ifdef DEBUG
void set_is_being_lazily_parsed(bool is_being_lazily_parsed) {
is_being_lazily_parsed_ = is_being_lazily_parsed;
}
bool is_being_lazily_parsed() const { return is_being_lazily_parsed_; }
#endif
bool ShouldEagerCompile() const; bool ShouldEagerCompile() const;
void set_should_eager_compile(); void set_should_eager_compile();
@ -629,7 +660,7 @@ class DeclarationScope : public Scope {
bool asm_module() const { return asm_module_; } bool asm_module() const { return asm_module_; }
void set_asm_module(); void set_asm_module();
bool asm_function() const { return asm_function_; } bool asm_function() const { return asm_function_; }
void set_asm_function() { asm_module_ = true; } void set_asm_function() { asm_function_ = true; }
void DeclareThis(AstValueFactory* ast_value_factory); void DeclareThis(AstValueFactory* ast_value_factory);
void DeclareArguments(AstValueFactory* ast_value_factory); void DeclareArguments(AstValueFactory* ast_value_factory);
@ -736,10 +767,9 @@ class DeclarationScope : public Scope {
// initializers. // initializers.
void AddLocal(Variable* var); void AddLocal(Variable* var);
void DeclareSloppyBlockFunction(const AstRawString* name, void DeclareSloppyBlockFunction(
SloppyBlockFunctionStatement* statement) { const AstRawString* name, Scope* scope,
sloppy_block_function_map_.Declare(zone(), name, statement); SloppyBlockFunctionStatement* statement = nullptr);
}
// Go through sloppy_block_function_map_ and hoist those (into this scope) // Go through sloppy_block_function_map_ and hoist those (into this scope)
// which should be hoisted. // which should be hoisted.
@ -819,7 +849,11 @@ class DeclarationScope : public Scope {
// This scope uses "super" property ('super.foo'). // This scope uses "super" property ('super.foo').
bool scope_uses_super_property_ : 1; bool scope_uses_super_property_ : 1;
bool should_eager_compile_ : 1; bool should_eager_compile_ : 1;
bool is_lazily_parsed_ : 1; // Set to true after we have finished lazy parsing the scope.
bool was_lazily_parsed_ : 1;
#if DEBUG
bool is_being_lazily_parsed_ : 1;
#endif
// Parameter list in source order. // Parameter list in source order.
ZoneList<Variable*> params_; ZoneList<Variable*> params_;

View File

@ -6,6 +6,7 @@
#include "src/ast/scopes.h" #include "src/ast/scopes.h"
#include "src/globals.h" #include "src/globals.h"
#include "src/objects-inl.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {

View File

@ -88,6 +88,7 @@ namespace internal {
"The function_data field should be a BytecodeArray on interpreter entry") \ "The function_data field should be a BytecodeArray on interpreter entry") \
V(kGeneratedCodeIsTooLarge, "Generated code is too large") \ V(kGeneratedCodeIsTooLarge, "Generated code is too large") \
V(kGenerator, "Generator") \ V(kGenerator, "Generator") \
V(kGetIterator, "GetIterator") \
V(kGlobalFunctionsMustHaveInitialMap, \ V(kGlobalFunctionsMustHaveInitialMap, \
"Global functions must have initial map") \ "Global functions must have initial map") \
V(kGraphBuildingFailed, "Optimized graph construction failed") \ V(kGraphBuildingFailed, "Optimized graph construction failed") \
@ -125,6 +126,7 @@ namespace internal {
V(kLookupVariableInCountOperation, "Lookup variable in count operation") \ V(kLookupVariableInCountOperation, "Lookup variable in count operation") \
V(kMapBecameDeprecated, "Map became deprecated") \ V(kMapBecameDeprecated, "Map became deprecated") \
V(kMapBecameUnstable, "Map became unstable") \ V(kMapBecameUnstable, "Map became unstable") \
V(kMissingBytecodeArray, "Missing bytecode array from function") \
V(kNativeFunctionLiteral, "Native function literal") \ V(kNativeFunctionLiteral, "Native function literal") \
V(kNeedSmiLiteral, "Need a Smi literal here") \ V(kNeedSmiLiteral, "Need a Smi literal here") \
V(kNoCasesLeft, "No cases left") \ V(kNoCasesLeft, "No cases left") \
@ -138,7 +140,6 @@ namespace internal {
V(kNotEnoughSpillSlotsForOsr, "Not enough spill slots for OSR") \ V(kNotEnoughSpillSlotsForOsr, "Not enough spill slots for OSR") \
V(kNotEnoughVirtualRegistersRegalloc, \ V(kNotEnoughVirtualRegistersRegalloc, \
"Not enough virtual registers (regalloc)") \ "Not enough virtual registers (regalloc)") \
V(kObjectFoundInSmiOnlyArray, "Object found in smi-only array") \
V(kObjectLiteralWithComplexProperty, "Object literal with complex property") \ V(kObjectLiteralWithComplexProperty, "Object literal with complex property") \
V(kOffsetOutOfRange, "Offset out of range") \ V(kOffsetOutOfRange, "Offset out of range") \
V(kOperandIsANumber, "Operand is a number") \ V(kOperandIsANumber, "Operand is a number") \
@ -165,7 +166,7 @@ namespace internal {
V(kObjectNotTagged, "The object is not tagged") \ V(kObjectNotTagged, "The object is not tagged") \
V(kOptimizationDisabled, "Optimization disabled") \ V(kOptimizationDisabled, "Optimization disabled") \
V(kOptimizationDisabledForTest, "Optimization disabled for test") \ V(kOptimizationDisabledForTest, "Optimization disabled for test") \
V(kOptimizedTooManyTimes, "Optimized too many times") \ V(kDeoptimizedTooManyTimes, "Deoptimized too many times") \
V(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister, \ V(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister, \
"Out of virtual registers while trying to allocate temp register") \ "Out of virtual registers while trying to allocate temp register") \
V(kParseScopeError, "Parse/scope error") \ V(kParseScopeError, "Parse/scope error") \
@ -263,7 +264,9 @@ namespace internal {
V(kWrongArgumentCountForInvokeIntrinsic, \ V(kWrongArgumentCountForInvokeIntrinsic, \
"Wrong number of arguments for intrinsic") \ "Wrong number of arguments for intrinsic") \
V(kShouldNotDirectlyEnterOsrFunction, \ V(kShouldNotDirectlyEnterOsrFunction, \
"Should not directly enter OSR-compiled function") "Should not directly enter OSR-compiled function") \
V(kUnexpectedReturnFromWasmTrap, \
"Should not return after throwing a wasm trap")
#define ERROR_MESSAGES_CONSTANTS(C, T) C, #define ERROR_MESSAGES_CONSTANTS(C, T) C,
enum BailoutReason { enum BailoutReason {

View File

@ -4,6 +4,9 @@
{ {
'includes': [ 'includes': [
'../third_party/icu/icu.isolate', '../third_party/icu/icu.isolate',
# MSVS runtime libraries.
'../gypfiles/win/msvs_dependencies.isolate',
], ],
'conditions': [ 'conditions': [
['v8_use_snapshot=="true" and v8_use_external_startup_data==1', { ['v8_use_snapshot=="true" and v8_use_external_startup_data==1', {

View File

@ -24,6 +24,9 @@
#ifndef POWER_8 #ifndef POWER_8
#define POWER_8 0x10000 #define POWER_8 0x10000
#endif #endif
#ifndef POWER_9
#define POWER_9 0x20000
#endif
#endif #endif
#if V8_OS_POSIX #if V8_OS_POSIX
#include <unistd.h> // sysconf() #include <unistd.h> // sysconf()
@ -670,7 +673,9 @@ CPU::CPU()
part_ = -1; part_ = -1;
if (auxv_cpu_type) { if (auxv_cpu_type) {
if (strcmp(auxv_cpu_type, "power8") == 0) { if (strcmp(auxv_cpu_type, "power9") == 0) {
part_ = PPC_POWER9;
} else if (strcmp(auxv_cpu_type, "power8") == 0) {
part_ = PPC_POWER8; part_ = PPC_POWER8;
} else if (strcmp(auxv_cpu_type, "power7") == 0) { } else if (strcmp(auxv_cpu_type, "power7") == 0) {
part_ = PPC_POWER7; part_ = PPC_POWER7;
@ -689,6 +694,9 @@ CPU::CPU()
#elif V8_OS_AIX #elif V8_OS_AIX
switch (_system_configuration.implementation) { switch (_system_configuration.implementation) {
case POWER_9:
part_ = PPC_POWER9;
break;
case POWER_8: case POWER_8:
part_ = PPC_POWER8; part_ = PPC_POWER8;
break; break;

View File

@ -69,6 +69,7 @@ class V8_BASE_EXPORT CPU final {
PPC_POWER6, PPC_POWER6,
PPC_POWER7, PPC_POWER7,
PPC_POWER8, PPC_POWER8,
PPC_POWER9,
PPC_G4, PPC_G4,
PPC_G5, PPC_G5,
PPC_PA6T PPC_PA6T

View File

@ -70,6 +70,14 @@ class TemplateHashMapImpl {
// Empties the hash map (occupancy() == 0). // Empties the hash map (occupancy() == 0).
void Clear(); void Clear();
// Empties the map and makes it unusable for allocation.
void Invalidate() {
AllocationPolicy::Delete(map_);
map_ = nullptr;
occupancy_ = 0;
capacity_ = 0;
}
// The number of (non-empty) entries in the table. // The number of (non-empty) entries in the table.
uint32_t occupancy() const { return occupancy_; } uint32_t occupancy() const { return occupancy_; }
@ -89,6 +97,14 @@ class TemplateHashMapImpl {
Entry* Start() const; Entry* Start() const;
Entry* Next(Entry* entry) const; Entry* Next(Entry* entry) const;
void Reset(AllocationPolicy allocator) {
Initialize(capacity_, allocator);
occupancy_ = 0;
}
protected:
void Initialize(uint32_t capacity, AllocationPolicy allocator);
private: private:
Entry* map_; Entry* map_;
uint32_t capacity_; uint32_t capacity_;
@ -102,7 +118,6 @@ class TemplateHashMapImpl {
Entry* FillEmptyEntry(Entry* entry, const Key& key, const Value& value, Entry* FillEmptyEntry(Entry* entry, const Key& key, const Value& value,
uint32_t hash, uint32_t hash,
AllocationPolicy allocator = AllocationPolicy()); AllocationPolicy allocator = AllocationPolicy());
void Initialize(uint32_t capacity, AllocationPolicy allocator);
void Resize(AllocationPolicy allocator); void Resize(AllocationPolicy allocator);
}; };
template <typename Key, typename Value, typename MatchFun, template <typename Key, typename Value, typename MatchFun,

View File

@ -7,8 +7,6 @@
#include <iterator> #include <iterator>
#include "src/base/macros.h"
namespace v8 { namespace v8 {
namespace base { namespace base {

View File

@ -14,9 +14,8 @@ namespace v8 {
namespace base { namespace base {
// Explicit instantiations for commonly used comparisons. // Explicit instantiations for commonly used comparisons.
#define DEFINE_MAKE_CHECK_OP_STRING(type) \ #define DEFINE_MAKE_CHECK_OP_STRING(type) \
template std::string* MakeCheckOpString<type, type>( \ template std::string* MakeCheckOpString<type, type>(type, type, char const*);
type const&, type const&, char const*);
DEFINE_MAKE_CHECK_OP_STRING(int) DEFINE_MAKE_CHECK_OP_STRING(int)
DEFINE_MAKE_CHECK_OP_STRING(long) // NOLINT(runtime/int) DEFINE_MAKE_CHECK_OP_STRING(long) // NOLINT(runtime/int)
DEFINE_MAKE_CHECK_OP_STRING(long long) // NOLINT(runtime/int) DEFINE_MAKE_CHECK_OP_STRING(long long) // NOLINT(runtime/int)
@ -29,11 +28,11 @@ DEFINE_MAKE_CHECK_OP_STRING(void const*)
// Explicit instantiations for floating point checks. // Explicit instantiations for floating point checks.
#define DEFINE_CHECK_OP_IMPL(NAME) \ #define DEFINE_CHECK_OP_IMPL(NAME) \
template std::string* Check##NAME##Impl<float, float>( \ template std::string* Check##NAME##Impl<float, float>(float lhs, float rhs, \
float const& lhs, float const& rhs, char const* msg); \ char const* msg); \
template std::string* Check##NAME##Impl<double, double>( \ template std::string* Check##NAME##Impl<double, double>( \
double const& lhs, double const& rhs, char const* msg); double lhs, double rhs, char const* msg);
DEFINE_CHECK_OP_IMPL(EQ) DEFINE_CHECK_OP_IMPL(EQ)
DEFINE_CHECK_OP_IMPL(NE) DEFINE_CHECK_OP_IMPL(NE)
DEFINE_CHECK_OP_IMPL(LE) DEFINE_CHECK_OP_IMPL(LE)

View File

@ -55,13 +55,14 @@ namespace base {
// Helper macro for binary operators. // Helper macro for binary operators.
// Don't use this macro directly in your code, use CHECK_EQ et al below. // Don't use this macro directly in your code, use CHECK_EQ et al below.
#define CHECK_OP(name, op, lhs, rhs) \ #define CHECK_OP(name, op, lhs, rhs) \
do { \ do { \
if (std::string* _msg = ::v8::base::Check##name##Impl( \ if (std::string* _msg = \
(lhs), (rhs), #lhs " " #op " " #rhs)) { \ ::v8::base::Check##name##Impl<decltype(lhs), decltype(rhs)>( \
V8_Fatal(__FILE__, __LINE__, "Check failed: %s.", _msg->c_str()); \ (lhs), (rhs), #lhs " " #op " " #rhs)) { \
delete _msg; \ V8_Fatal(__FILE__, __LINE__, "Check failed: %s.", _msg->c_str()); \
} \ delete _msg; \
} \
} while (0) } while (0)
#else #else
@ -73,13 +74,22 @@ namespace base {
#endif #endif
// Helper to determine how to pass values: Pass scalars and arrays by value,
// others by const reference. std::decay<T> provides the type which should be
// used to pass T by value, e.g. converts array to pointer and removes const,
// volatile and reference.
template <typename T>
struct PassType : public std::conditional<
std::is_scalar<typename std::decay<T>::type>::value,
typename std::decay<T>::type, T const&> {};
// Build the error message string. This is separate from the "Impl" // Build the error message string. This is separate from the "Impl"
// function template because it is not performance critical and so can // function template because it is not performance critical and so can
// be out of line, while the "Impl" code should be inline. Caller // be out of line, while the "Impl" code should be inline. Caller
// takes ownership of the returned string. // takes ownership of the returned string.
template <typename Lhs, typename Rhs> template <typename Lhs, typename Rhs>
std::string* MakeCheckOpString(Lhs const& lhs, Rhs const& rhs, std::string* MakeCheckOpString(typename PassType<Lhs>::type lhs,
typename PassType<Rhs>::type rhs,
char const* msg) { char const* msg) {
std::ostringstream ss; std::ostringstream ss;
ss << msg << " (" << lhs << " vs. " << rhs << ")"; ss << msg << " (" << lhs << " vs. " << rhs << ")";
@ -90,7 +100,7 @@ std::string* MakeCheckOpString(Lhs const& lhs, Rhs const& rhs,
// in logging.cc. // in logging.cc.
#define DEFINE_MAKE_CHECK_OP_STRING(type) \ #define DEFINE_MAKE_CHECK_OP_STRING(type) \
extern template V8_BASE_EXPORT std::string* MakeCheckOpString<type, type>( \ extern template V8_BASE_EXPORT std::string* MakeCheckOpString<type, type>( \
type const&, type const&, char const*); type, type, char const*);
DEFINE_MAKE_CHECK_OP_STRING(int) DEFINE_MAKE_CHECK_OP_STRING(int)
DEFINE_MAKE_CHECK_OP_STRING(long) // NOLINT(runtime/int) DEFINE_MAKE_CHECK_OP_STRING(long) // NOLINT(runtime/int)
DEFINE_MAKE_CHECK_OP_STRING(long long) // NOLINT(runtime/int) DEFINE_MAKE_CHECK_OP_STRING(long long) // NOLINT(runtime/int)
@ -101,27 +111,77 @@ DEFINE_MAKE_CHECK_OP_STRING(char const*)
DEFINE_MAKE_CHECK_OP_STRING(void const*) DEFINE_MAKE_CHECK_OP_STRING(void const*)
#undef DEFINE_MAKE_CHECK_OP_STRING #undef DEFINE_MAKE_CHECK_OP_STRING
// is_signed_vs_unsigned::value is true if both types are integral, Lhs is
// signed, and Rhs is unsigned. False in all other cases.
template <typename Lhs, typename Rhs>
struct is_signed_vs_unsigned {
enum : bool {
value = std::is_integral<Lhs>::value && std::is_integral<Rhs>::value &&
std::is_signed<Lhs>::value && std::is_unsigned<Rhs>::value
};
};
// Same thing, other way around: Lhs is unsigned, Rhs signed.
template <typename Lhs, typename Rhs>
struct is_unsigned_vs_signed : public is_signed_vs_unsigned<Rhs, Lhs> {};
// Specialize the compare functions for signed vs. unsigned comparisons.
// std::enable_if ensures that this template is only instantiable if both Lhs
// and Rhs are integral types, and their signedness does not match.
#define MAKE_UNSIGNED(Type, value) \
static_cast<typename std::make_unsigned<Type>::type>(value)
#define DEFINE_SIGNED_MISMATCH_COMP(CHECK, NAME, IMPL) \
template <typename Lhs, typename Rhs> \
V8_INLINE typename std::enable_if<CHECK<Lhs, Rhs>::value, bool>::type \
Cmp##NAME##Impl(Lhs const& lhs, Rhs const& rhs) { \
return IMPL; \
}
DEFINE_SIGNED_MISMATCH_COMP(is_signed_vs_unsigned, EQ,
lhs >= 0 && MAKE_UNSIGNED(Lhs, lhs) == rhs)
DEFINE_SIGNED_MISMATCH_COMP(is_signed_vs_unsigned, LT,
lhs < 0 || MAKE_UNSIGNED(Lhs, lhs) < rhs)
DEFINE_SIGNED_MISMATCH_COMP(is_signed_vs_unsigned, LE,
lhs <= 0 || MAKE_UNSIGNED(Lhs, lhs) <= rhs)
DEFINE_SIGNED_MISMATCH_COMP(is_signed_vs_unsigned, NE, !CmpEQImpl(lhs, rhs))
DEFINE_SIGNED_MISMATCH_COMP(is_signed_vs_unsigned, GT, !CmpLEImpl(lhs, rhs))
DEFINE_SIGNED_MISMATCH_COMP(is_signed_vs_unsigned, GE, !CmpLTImpl(lhs, rhs))
DEFINE_SIGNED_MISMATCH_COMP(is_unsigned_vs_signed, EQ, CmpEQImpl(rhs, lhs))
DEFINE_SIGNED_MISMATCH_COMP(is_unsigned_vs_signed, NE, CmpNEImpl(rhs, lhs))
DEFINE_SIGNED_MISMATCH_COMP(is_unsigned_vs_signed, LT, CmpGTImpl(rhs, lhs))
DEFINE_SIGNED_MISMATCH_COMP(is_unsigned_vs_signed, LE, CmpGEImpl(rhs, lhs))
DEFINE_SIGNED_MISMATCH_COMP(is_unsigned_vs_signed, GT, CmpLTImpl(rhs, lhs))
DEFINE_SIGNED_MISMATCH_COMP(is_unsigned_vs_signed, GE, CmpLEImpl(rhs, lhs))
#undef MAKE_UNSIGNED
#undef DEFINE_SIGNED_MISMATCH_COMP
// Helper functions for CHECK_OP macro. // Helper functions for CHECK_OP macro.
// The (int, int) specialization works around the issue that the compiler
// will not instantiate the template version of the function on values of
// unnamed enum type - see comment below.
// The (float, float) and (double, double) instantiations are explicitly // The (float, float) and (double, double) instantiations are explicitly
// externialized to ensure proper 32/64-bit comparisons on x86. // externalized to ensure proper 32/64-bit comparisons on x86.
// The Cmp##NAME##Impl function is only instantiable if one of the two types is
// not integral or their signedness matches (i.e. whenever no specialization is
// required, see above). Otherwise it is disabled by the enable_if construct,
// and the compiler will pick a specialization from above.
#define DEFINE_CHECK_OP_IMPL(NAME, op) \ #define DEFINE_CHECK_OP_IMPL(NAME, op) \
template <typename Lhs, typename Rhs> \ template <typename Lhs, typename Rhs> \
V8_INLINE std::string* Check##NAME##Impl(Lhs const& lhs, Rhs const& rhs, \ V8_INLINE \
char const* msg) { \ typename std::enable_if<!is_signed_vs_unsigned<Lhs, Rhs>::value && \
return V8_LIKELY(lhs op rhs) ? nullptr : MakeCheckOpString(lhs, rhs, msg); \ !is_unsigned_vs_signed<Lhs, Rhs>::value, \
bool>::type \
Cmp##NAME##Impl(typename PassType<Lhs>::type lhs, \
typename PassType<Rhs>::type rhs) { \
return lhs op rhs; \
} \ } \
V8_INLINE std::string* Check##NAME##Impl(int lhs, int rhs, \ template <typename Lhs, typename Rhs> \
V8_INLINE std::string* Check##NAME##Impl(typename PassType<Lhs>::type lhs, \
typename PassType<Rhs>::type rhs, \
char const* msg) { \ char const* msg) { \
return V8_LIKELY(lhs op rhs) ? nullptr : MakeCheckOpString(lhs, rhs, msg); \ bool cmp = Cmp##NAME##Impl<Lhs, Rhs>(lhs, rhs); \
return V8_LIKELY(cmp) ? nullptr \
: MakeCheckOpString<Lhs, Rhs>(lhs, rhs, msg); \
} \ } \
extern template V8_BASE_EXPORT std::string* Check##NAME##Impl<float, float>( \ extern template V8_BASE_EXPORT std::string* Check##NAME##Impl<float, float>( \
float const& lhs, float const& rhs, char const* msg); \ float lhs, float rhs, char const* msg); \
extern template V8_BASE_EXPORT std::string* \ extern template V8_BASE_EXPORT std::string* \
Check##NAME##Impl<double, double>(double const& lhs, double const& rhs, \ Check##NAME##Impl<double, double>(double lhs, double rhs, \
char const* msg); char const* msg);
DEFINE_CHECK_OP_IMPL(EQ, ==) DEFINE_CHECK_OP_IMPL(EQ, ==)
DEFINE_CHECK_OP_IMPL(NE, !=) DEFINE_CHECK_OP_IMPL(NE, !=)
@ -141,11 +201,6 @@ DEFINE_CHECK_OP_IMPL(GT, > )
#define CHECK_NOT_NULL(val) CHECK((val) != nullptr) #define CHECK_NOT_NULL(val) CHECK((val) != nullptr)
#define CHECK_IMPLIES(lhs, rhs) CHECK(!(lhs) || (rhs)) #define CHECK_IMPLIES(lhs, rhs) CHECK(!(lhs) || (rhs))
// Exposed for making debugging easier (to see where your function is being
// called, just add a call to DumpBacktrace).
void DumpBacktrace();
} // namespace base } // namespace base
} // namespace v8 } // namespace v8

View File

@ -282,23 +282,4 @@ inline T RoundUp(T x, intptr_t m) {
return RoundDown<T>(static_cast<T>(x + m - 1), m); return RoundDown<T>(static_cast<T>(x + m - 1), m);
} }
namespace v8 {
namespace base {
// TODO(yangguo): This is a poor man's replacement for std::is_fundamental,
// which requires C++11. Switch to std::is_fundamental once possible.
template <typename T>
inline bool is_fundamental() {
return false;
}
template <>
inline bool is_fundamental<uint8_t>() {
return true;
}
} // namespace base
} // namespace v8
#endif // V8_BASE_MACROS_H_ #endif // V8_BASE_MACROS_H_

Some files were not shown because too many files have changed in this diff Show More