deps: update v8 to 3.29.93.1

This commit is contained in:
Fedor Indutny 2014-10-10 14:49:02 +04:00
parent 4fae2356d1
commit 6bcea4ff93
1288 changed files with 83232 additions and 65409 deletions

5
deps/v8/.DEPS.git vendored
View File

@ -24,6 +24,11 @@ deps = {
} }
deps_os = { deps_os = {
'android':
{
'v8/third_party/android_tools':
Var('git_url') + '/android_tools.git@31869996507de16812bb53a3d0aaa15cd6194c16',
},
'win': 'win':
{ {
'v8/third_party/cygwin': 'v8/third_party/cygwin':

2
deps/v8/.gitignore vendored
View File

@ -80,3 +80,5 @@ GRTAGS
GSYMS GSYMS
GPATH GPATH
gtags.files gtags.files
turbo*.dot
turbo*.json

1
deps/v8/AUTHORS vendored
View File

@ -56,6 +56,7 @@ Patrick Gansterer <paroga@paroga.com>
Peter Varga <pvarga@inf.u-szeged.hu> Peter Varga <pvarga@inf.u-szeged.hu>
Rafal Krypa <rafal@krypa.net> Rafal Krypa <rafal@krypa.net>
Rajeev R Krithivasan <rkrithiv@codeaurora.org> Rajeev R Krithivasan <rkrithiv@codeaurora.org>
Refael Ackermann <refack@gmail.com>
Rene Rebe <rene@exactcode.de> Rene Rebe <rene@exactcode.de>
Robert Mustacchi <rm@fingolfin.org> Robert Mustacchi <rm@fingolfin.org>
Rodolph Perfetta <rodolph.perfetta@arm.com> Rodolph Perfetta <rodolph.perfetta@arm.com>

235
deps/v8/BUILD.gn vendored
View File

@ -25,7 +25,7 @@ v8_random_seed = "314159265"
# Configurations # Configurations
# #
config("internal_config") { config("internal_config") {
visibility = ":*" # Only targets in this file can depend on this. visibility = [ ":*" ] # Only targets in this file can depend on this.
include_dirs = [ "." ] include_dirs = [ "." ]
@ -38,7 +38,7 @@ config("internal_config") {
} }
config("internal_config_base") { config("internal_config_base") {
visibility = ":*" # Only targets in this file can depend on this. visibility = [ ":*" ] # Only targets in this file can depend on this.
include_dirs = [ "." ] include_dirs = [ "." ]
} }
@ -56,7 +56,7 @@ config("external_config") {
} }
config("features") { config("features") {
visibility = ":*" # Only targets in this file can depend on this. visibility = [ ":*" ] # Only targets in this file can depend on this.
defines = [] defines = []
@ -118,7 +118,7 @@ config("features") {
} }
config("toolchain") { config("toolchain") {
visibility = ":*" # Only targets in this file can depend on this. visibility = [ ":*" ] # Only targets in this file can depend on this.
defines = [] defines = []
cflags = [] cflags = []
@ -167,7 +167,7 @@ config("toolchain") {
# #
action("js2c") { action("js2c") {
visibility = ":*" # Only targets in this file can depend on this. visibility = [ ":*" ] # Only targets in this file can depend on this.
script = "tools/js2c.py" script = "tools/js2c.py"
@ -184,24 +184,25 @@ action("js2c") {
"src/uri.js", "src/uri.js",
"third_party/fdlibm/fdlibm.js", "third_party/fdlibm/fdlibm.js",
"src/math.js", "src/math.js",
"src/messages.js",
"src/apinatives.js", "src/apinatives.js",
"src/debug-debugger.js",
"src/mirror-debugger.js",
"src/liveedit-debugger.js",
"src/date.js", "src/date.js",
"src/json.js",
"src/regexp.js", "src/regexp.js",
"src/arraybuffer.js", "src/arraybuffer.js",
"src/typedarray.js", "src/typedarray.js",
"src/collection.js", "src/generator.js",
"src/collection-iterator.js",
"src/weak_collection.js",
"src/promise.js",
"src/object-observe.js", "src/object-observe.js",
"src/macros.py", "src/collection.js",
"src/weak-collection.js",
"src/collection-iterator.js",
"src/promise.js",
"src/messages.js",
"src/json.js",
"src/array-iterator.js", "src/array-iterator.js",
"src/string-iterator.js", "src/string-iterator.js",
"src/debug-debugger.js",
"src/mirror-debugger.js",
"src/liveedit-debugger.js",
"src/macros.py",
] ]
outputs = [ outputs = [
@ -228,7 +229,7 @@ action("js2c") {
} }
action("js2c_experimental") { action("js2c_experimental") {
visibility = ":*" # Only targets in this file can depend on this. visibility = [ ":*" ] # Only targets in this file can depend on this.
script = "tools/js2c.py" script = "tools/js2c.py"
@ -242,6 +243,7 @@ action("js2c_experimental") {
"src/generator.js", "src/generator.js",
"src/harmony-string.js", "src/harmony-string.js",
"src/harmony-array.js", "src/harmony-array.js",
"src/harmony-classes.js",
] ]
outputs = [ outputs = [
@ -265,7 +267,7 @@ action("js2c_experimental") {
if (v8_use_external_startup_data) { if (v8_use_external_startup_data) {
action("natives_blob") { action("natives_blob") {
visibility = ":*" # Only targets in this file can depend on this. visibility = [ ":*" ] # Only targets in this file can depend on this.
deps = [ deps = [
":js2c", ":js2c",
@ -288,7 +290,7 @@ if (v8_use_external_startup_data) {
} }
action("postmortem-metadata") { action("postmortem-metadata") {
visibility = ":*" # Only targets in this file can depend on this. visibility = [ ":*" ] # Only targets in this file can depend on this.
script = "tools/gen-postmortem-metadata.py" script = "tools/gen-postmortem-metadata.py"
@ -307,7 +309,7 @@ action("postmortem-metadata") {
} }
action("run_mksnapshot") { action("run_mksnapshot") {
visibility = ":*" # Only targets in this file can depend on this. visibility = [ ":*" ] # Only targets in this file can depend on this.
deps = [ ":mksnapshot($host_toolchain)" ] deps = [ ":mksnapshot($host_toolchain)" ]
@ -345,7 +347,7 @@ action("run_mksnapshot") {
# #
source_set("v8_nosnapshot") { source_set("v8_nosnapshot") {
visibility = ":*" # Only targets in this file can depend on this. visibility = [ ":*" ] # Only targets in this file can depend on this.
deps = [ deps = [
":js2c", ":js2c",
@ -366,7 +368,7 @@ source_set("v8_nosnapshot") {
} }
source_set("v8_snapshot") { source_set("v8_snapshot") {
visibility = ":*" # Only targets in this file can depend on this. visibility = [ ":*" ] # Only targets in this file can depend on this.
deps = [ deps = [
":js2c", ":js2c",
@ -389,7 +391,7 @@ source_set("v8_snapshot") {
if (v8_use_external_startup_data) { if (v8_use_external_startup_data) {
source_set("v8_external_snapshot") { source_set("v8_external_snapshot") {
visibility = ":*" # Only targets in this file can depend on this. visibility = [ ":*" ] # Only targets in this file can depend on this.
deps = [ deps = [
":js2c", ":js2c",
@ -411,7 +413,7 @@ if (v8_use_external_startup_data) {
} }
source_set("v8_base") { source_set("v8_base") {
visibility = ":*" # Only targets in this file can depend on this. visibility = [ ":*" ] # Only targets in this file can depend on this.
sources = [ sources = [
"src/accessors.cc", "src/accessors.cc",
@ -434,6 +436,12 @@ source_set("v8_base") {
"src/ast-value-factory.h", "src/ast-value-factory.h",
"src/ast.cc", "src/ast.cc",
"src/ast.h", "src/ast.h",
"src/background-parsing-task.cc",
"src/background-parsing-task.h",
"src/bailout-reason.cc",
"src/bailout-reason.h",
"src/basic-block-profiler.cc",
"src/basic-block-profiler.h",
"src/bignum-dtoa.cc", "src/bignum-dtoa.cc",
"src/bignum-dtoa.h", "src/bignum-dtoa.h",
"src/bignum.cc", "src/bignum.cc",
@ -451,6 +459,8 @@ source_set("v8_base") {
"src/checks.h", "src/checks.h",
"src/circular-queue-inl.h", "src/circular-queue-inl.h",
"src/circular-queue.h", "src/circular-queue.h",
"src/code-factory.cc",
"src/code-factory.h",
"src/code-stubs.cc", "src/code-stubs.cc",
"src/code-stubs.h", "src/code-stubs.h",
"src/code-stubs-hydrogen.cc", "src/code-stubs-hydrogen.cc",
@ -459,12 +469,19 @@ source_set("v8_base") {
"src/codegen.h", "src/codegen.h",
"src/compilation-cache.cc", "src/compilation-cache.cc",
"src/compilation-cache.h", "src/compilation-cache.h",
"src/compiler/access-builder.cc",
"src/compiler/access-builder.h",
"src/compiler/ast-graph-builder.cc", "src/compiler/ast-graph-builder.cc",
"src/compiler/ast-graph-builder.h", "src/compiler/ast-graph-builder.h",
"src/compiler/basic-block-instrumentor.cc",
"src/compiler/basic-block-instrumentor.h",
"src/compiler/change-lowering.cc",
"src/compiler/change-lowering.h",
"src/compiler/code-generator-impl.h", "src/compiler/code-generator-impl.h",
"src/compiler/code-generator.cc", "src/compiler/code-generator.cc",
"src/compiler/code-generator.h", "src/compiler/code-generator.h",
"src/compiler/common-node-cache.h", "src/compiler/common-node-cache.h",
"src/compiler/common-operator.cc",
"src/compiler/common-operator.h", "src/compiler/common-operator.h",
"src/compiler/control-builders.cc", "src/compiler/control-builders.cc",
"src/compiler/control-builders.h", "src/compiler/control-builders.h",
@ -493,24 +510,28 @@ source_set("v8_base") {
"src/compiler/instruction-selector.h", "src/compiler/instruction-selector.h",
"src/compiler/instruction.cc", "src/compiler/instruction.cc",
"src/compiler/instruction.h", "src/compiler/instruction.h",
"src/compiler/js-builtin-reducer.cc",
"src/compiler/js-builtin-reducer.h",
"src/compiler/js-context-specialization.cc", "src/compiler/js-context-specialization.cc",
"src/compiler/js-context-specialization.h", "src/compiler/js-context-specialization.h",
"src/compiler/js-generic-lowering.cc", "src/compiler/js-generic-lowering.cc",
"src/compiler/js-generic-lowering.h", "src/compiler/js-generic-lowering.h",
"src/compiler/js-graph.cc", "src/compiler/js-graph.cc",
"src/compiler/js-graph.h", "src/compiler/js-graph.h",
"src/compiler/js-inlining.cc",
"src/compiler/js-inlining.h",
"src/compiler/js-operator.h", "src/compiler/js-operator.h",
"src/compiler/js-typed-lowering.cc", "src/compiler/js-typed-lowering.cc",
"src/compiler/js-typed-lowering.h", "src/compiler/js-typed-lowering.h",
"src/compiler/linkage-impl.h", "src/compiler/linkage-impl.h",
"src/compiler/linkage.cc", "src/compiler/linkage.cc",
"src/compiler/linkage.h", "src/compiler/linkage.h",
"src/compiler/lowering-builder.cc",
"src/compiler/lowering-builder.h",
"src/compiler/machine-node-factory.h",
"src/compiler/machine-operator-reducer.cc", "src/compiler/machine-operator-reducer.cc",
"src/compiler/machine-operator-reducer.h", "src/compiler/machine-operator-reducer.h",
"src/compiler/machine-operator.cc",
"src/compiler/machine-operator.h", "src/compiler/machine-operator.h",
"src/compiler/machine-type.cc",
"src/compiler/machine-type.h",
"src/compiler/node-aux-data-inl.h", "src/compiler/node-aux-data-inl.h",
"src/compiler/node-aux-data.h", "src/compiler/node-aux-data.h",
"src/compiler/node-cache.cc", "src/compiler/node-cache.cc",
@ -523,6 +544,7 @@ source_set("v8_base") {
"src/compiler/opcodes.h", "src/compiler/opcodes.h",
"src/compiler/operator-properties-inl.h", "src/compiler/operator-properties-inl.h",
"src/compiler/operator-properties.h", "src/compiler/operator-properties.h",
"src/compiler/operator.cc",
"src/compiler/operator.h", "src/compiler/operator.h",
"src/compiler/phi-reducer.h", "src/compiler/phi-reducer.h",
"src/compiler/pipeline.cc", "src/compiler/pipeline.cc",
@ -538,14 +560,16 @@ source_set("v8_base") {
"src/compiler/scheduler.h", "src/compiler/scheduler.h",
"src/compiler/simplified-lowering.cc", "src/compiler/simplified-lowering.cc",
"src/compiler/simplified-lowering.h", "src/compiler/simplified-lowering.h",
"src/compiler/simplified-node-factory.h", "src/compiler/simplified-operator-reducer.cc",
"src/compiler/simplified-operator-reducer.h",
"src/compiler/simplified-operator.cc",
"src/compiler/simplified-operator.h", "src/compiler/simplified-operator.h",
"src/compiler/source-position.cc", "src/compiler/source-position.cc",
"src/compiler/source-position.h", "src/compiler/source-position.h",
"src/compiler/structured-machine-assembler.cc",
"src/compiler/structured-machine-assembler.h",
"src/compiler/typer.cc", "src/compiler/typer.cc",
"src/compiler/typer.h", "src/compiler/typer.h",
"src/compiler/value-numbering-reducer.cc",
"src/compiler/value-numbering-reducer.h",
"src/compiler/verifier.cc", "src/compiler/verifier.cc",
"src/compiler/verifier.h", "src/compiler/verifier.h",
"src/compiler.cc", "src/compiler.cc",
@ -601,7 +625,6 @@ source_set("v8_base") {
"src/fast-dtoa.cc", "src/fast-dtoa.cc",
"src/fast-dtoa.h", "src/fast-dtoa.h",
"src/feedback-slots.h", "src/feedback-slots.h",
"src/field-index.cc",
"src/field-index.h", "src/field-index.h",
"src/field-index-inl.h", "src/field-index-inl.h",
"src/fixed-dtoa.cc", "src/fixed-dtoa.cc",
@ -630,6 +653,8 @@ source_set("v8_base") {
"src/heap-snapshot-generator-inl.h", "src/heap-snapshot-generator-inl.h",
"src/heap-snapshot-generator.cc", "src/heap-snapshot-generator.cc",
"src/heap-snapshot-generator.h", "src/heap-snapshot-generator.h",
"src/heap/gc-idle-time-handler.cc",
"src/heap/gc-idle-time-handler.h",
"src/heap/gc-tracer.cc", "src/heap/gc-tracer.cc",
"src/heap/gc-tracer.h", "src/heap/gc-tracer.h",
"src/heap/heap-inl.h", "src/heap/heap-inl.h",
@ -707,11 +732,25 @@ source_set("v8_base") {
"src/i18n.h", "src/i18n.h",
"src/icu_util.cc", "src/icu_util.cc",
"src/icu_util.h", "src/icu_util.h",
"src/ic-inl.h", "src/ic/access-compiler.cc",
"src/ic.cc", "src/ic/access-compiler.h",
"src/ic.h", "src/ic/call-optimization.cc",
"src/ic/call-optimization.h",
"src/ic/handler-compiler.cc",
"src/ic/handler-compiler.h",
"src/ic/ic-inl.h",
"src/ic/ic-state.cc",
"src/ic/ic-state.h",
"src/ic/ic.cc",
"src/ic/ic.h",
"src/ic/ic-compiler.cc",
"src/ic/ic-compiler.h",
"src/ic/stub-cache.cc",
"src/ic/stub-cache.h",
"src/interface.cc", "src/interface.cc",
"src/interface.h", "src/interface.h",
"src/interface-descriptors.cc",
"src/interface-descriptors.h",
"src/interpreter-irregexp.cc", "src/interpreter-irregexp.cc",
"src/interpreter-irregexp.h", "src/interpreter-irregexp.h",
"src/isolate.cc", "src/isolate.cc",
@ -785,8 +824,21 @@ source_set("v8_base") {
"src/rewriter.h", "src/rewriter.h",
"src/runtime-profiler.cc", "src/runtime-profiler.cc",
"src/runtime-profiler.h", "src/runtime-profiler.h",
"src/runtime.cc", "src/runtime/runtime-collections.cc",
"src/runtime.h", "src/runtime/runtime-compiler.cc",
"src/runtime/runtime-i18n.cc",
"src/runtime/runtime-json.cc",
"src/runtime/runtime-maths.cc",
"src/runtime/runtime-numbers.cc",
"src/runtime/runtime-regexp.cc",
"src/runtime/runtime-strings.cc",
"src/runtime/runtime-test.cc",
"src/runtime/runtime-typedarray.cc",
"src/runtime/runtime-uri.cc",
"src/runtime/runtime-utils.h",
"src/runtime/runtime.cc",
"src/runtime/runtime.h",
"src/runtime/string-builder.h",
"src/safepoint-table.cc", "src/safepoint-table.cc",
"src/safepoint-table.h", "src/safepoint-table.h",
"src/sampler.cc", "src/sampler.cc",
@ -812,13 +864,14 @@ source_set("v8_base") {
"src/string-stream.h", "src/string-stream.h",
"src/strtod.cc", "src/strtod.cc",
"src/strtod.h", "src/strtod.h",
"src/stub-cache.cc",
"src/stub-cache.h",
"src/token.cc", "src/token.cc",
"src/token.h", "src/token.h",
"src/transitions-inl.h", "src/transitions-inl.h",
"src/transitions.cc", "src/transitions.cc",
"src/transitions.h", "src/transitions.h",
"src/type-feedback-vector-inl.h",
"src/type-feedback-vector.cc",
"src/type-feedback-vector.h",
"src/type-info.cc", "src/type-info.cc",
"src/type-info.h", "src/type-info.h",
"src/types-inl.h", "src/types-inl.h",
@ -871,7 +924,7 @@ source_set("v8_base") {
"src/ia32/frames-ia32.cc", "src/ia32/frames-ia32.cc",
"src/ia32/frames-ia32.h", "src/ia32/frames-ia32.h",
"src/ia32/full-codegen-ia32.cc", "src/ia32/full-codegen-ia32.cc",
"src/ia32/ic-ia32.cc", "src/ia32/interface-descriptors-ia32.cc",
"src/ia32/lithium-codegen-ia32.cc", "src/ia32/lithium-codegen-ia32.cc",
"src/ia32/lithium-codegen-ia32.h", "src/ia32/lithium-codegen-ia32.h",
"src/ia32/lithium-gap-resolver-ia32.cc", "src/ia32/lithium-gap-resolver-ia32.cc",
@ -882,11 +935,13 @@ source_set("v8_base") {
"src/ia32/macro-assembler-ia32.h", "src/ia32/macro-assembler-ia32.h",
"src/ia32/regexp-macro-assembler-ia32.cc", "src/ia32/regexp-macro-assembler-ia32.cc",
"src/ia32/regexp-macro-assembler-ia32.h", "src/ia32/regexp-macro-assembler-ia32.h",
"src/ia32/stub-cache-ia32.cc",
"src/compiler/ia32/code-generator-ia32.cc", "src/compiler/ia32/code-generator-ia32.cc",
"src/compiler/ia32/instruction-codes-ia32.h", "src/compiler/ia32/instruction-codes-ia32.h",
"src/compiler/ia32/instruction-selector-ia32.cc", "src/compiler/ia32/instruction-selector-ia32.cc",
"src/compiler/ia32/linkage-ia32.cc", "src/compiler/ia32/linkage-ia32.cc",
"src/ic/ia32/ic-ia32.cc",
"src/ic/ia32/ic-compiler-ia32.cc",
"src/ic/ia32/stub-cache-ia32.cc",
] ]
} else if (v8_target_arch == "x64") { } else if (v8_target_arch == "x64") {
sources += [ sources += [
@ -905,7 +960,7 @@ source_set("v8_base") {
"src/x64/frames-x64.cc", "src/x64/frames-x64.cc",
"src/x64/frames-x64.h", "src/x64/frames-x64.h",
"src/x64/full-codegen-x64.cc", "src/x64/full-codegen-x64.cc",
"src/x64/ic-x64.cc", "src/x64/interface-descriptors-x64.cc",
"src/x64/lithium-codegen-x64.cc", "src/x64/lithium-codegen-x64.cc",
"src/x64/lithium-codegen-x64.h", "src/x64/lithium-codegen-x64.h",
"src/x64/lithium-gap-resolver-x64.cc", "src/x64/lithium-gap-resolver-x64.cc",
@ -916,11 +971,15 @@ source_set("v8_base") {
"src/x64/macro-assembler-x64.h", "src/x64/macro-assembler-x64.h",
"src/x64/regexp-macro-assembler-x64.cc", "src/x64/regexp-macro-assembler-x64.cc",
"src/x64/regexp-macro-assembler-x64.h", "src/x64/regexp-macro-assembler-x64.h",
"src/x64/stub-cache-x64.cc",
"src/compiler/x64/code-generator-x64.cc", "src/compiler/x64/code-generator-x64.cc",
"src/compiler/x64/instruction-codes-x64.h", "src/compiler/x64/instruction-codes-x64.h",
"src/compiler/x64/instruction-selector-x64.cc", "src/compiler/x64/instruction-selector-x64.cc",
"src/compiler/x64/linkage-x64.cc", "src/compiler/x64/linkage-x64.cc",
"src/ic/x64/access-compiler-x64.cc",
"src/ic/x64/handler-compiler-x64.cc",
"src/ic/x64/ic-x64.cc",
"src/ic/x64/ic-compiler-x64.cc",
"src/ic/x64/stub-cache-x64.cc",
] ]
} else if (v8_target_arch == "arm") { } else if (v8_target_arch == "arm") {
sources += [ sources += [
@ -941,7 +1000,8 @@ source_set("v8_base") {
"src/arm/frames-arm.cc", "src/arm/frames-arm.cc",
"src/arm/frames-arm.h", "src/arm/frames-arm.h",
"src/arm/full-codegen-arm.cc", "src/arm/full-codegen-arm.cc",
"src/arm/ic-arm.cc", "src/arm/interface-descriptors-arm.cc",
"src/arm/interface-descriptors-arm.h",
"src/arm/lithium-arm.cc", "src/arm/lithium-arm.cc",
"src/arm/lithium-arm.h", "src/arm/lithium-arm.h",
"src/arm/lithium-codegen-arm.cc", "src/arm/lithium-codegen-arm.cc",
@ -953,11 +1013,15 @@ source_set("v8_base") {
"src/arm/regexp-macro-assembler-arm.cc", "src/arm/regexp-macro-assembler-arm.cc",
"src/arm/regexp-macro-assembler-arm.h", "src/arm/regexp-macro-assembler-arm.h",
"src/arm/simulator-arm.cc", "src/arm/simulator-arm.cc",
"src/arm/stub-cache-arm.cc",
"src/compiler/arm/code-generator-arm.cc", "src/compiler/arm/code-generator-arm.cc",
"src/compiler/arm/instruction-codes-arm.h", "src/compiler/arm/instruction-codes-arm.h",
"src/compiler/arm/instruction-selector-arm.cc", "src/compiler/arm/instruction-selector-arm.cc",
"src/compiler/arm/linkage-arm.cc", "src/compiler/arm/linkage-arm.cc",
"src/ic/arm/access-compiler-arm.cc",
"src/ic/arm/handler-compiler-arm.cc",
"src/ic/arm/ic-arm.cc",
"src/ic/arm/ic-compiler-arm.cc",
"src/ic/arm/stub-cache-arm.cc",
] ]
} else if (v8_target_arch == "arm64") { } else if (v8_target_arch == "arm64") {
sources += [ sources += [
@ -981,11 +1045,12 @@ source_set("v8_base") {
"src/arm64/frames-arm64.cc", "src/arm64/frames-arm64.cc",
"src/arm64/frames-arm64.h", "src/arm64/frames-arm64.h",
"src/arm64/full-codegen-arm64.cc", "src/arm64/full-codegen-arm64.cc",
"src/arm64/ic-arm64.cc",
"src/arm64/instructions-arm64.cc", "src/arm64/instructions-arm64.cc",
"src/arm64/instructions-arm64.h", "src/arm64/instructions-arm64.h",
"src/arm64/instrument-arm64.cc", "src/arm64/instrument-arm64.cc",
"src/arm64/instrument-arm64.h", "src/arm64/instrument-arm64.h",
"src/arm64/interface-descriptors-arm64.cc",
"src/arm64/interface-descriptors-arm64.h",
"src/arm64/lithium-arm64.cc", "src/arm64/lithium-arm64.cc",
"src/arm64/lithium-arm64.h", "src/arm64/lithium-arm64.h",
"src/arm64/lithium-codegen-arm64.cc", "src/arm64/lithium-codegen-arm64.cc",
@ -999,13 +1064,17 @@ source_set("v8_base") {
"src/arm64/regexp-macro-assembler-arm64.h", "src/arm64/regexp-macro-assembler-arm64.h",
"src/arm64/simulator-arm64.cc", "src/arm64/simulator-arm64.cc",
"src/arm64/simulator-arm64.h", "src/arm64/simulator-arm64.h",
"src/arm64/stub-cache-arm64.cc",
"src/arm64/utils-arm64.cc", "src/arm64/utils-arm64.cc",
"src/arm64/utils-arm64.h", "src/arm64/utils-arm64.h",
"src/compiler/arm64/code-generator-arm64.cc", "src/compiler/arm64/code-generator-arm64.cc",
"src/compiler/arm64/instruction-codes-arm64.h", "src/compiler/arm64/instruction-codes-arm64.h",
"src/compiler/arm64/instruction-selector-arm64.cc", "src/compiler/arm64/instruction-selector-arm64.cc",
"src/compiler/arm64/linkage-arm64.cc", "src/compiler/arm64/linkage-arm64.cc",
"src/ic/arm64/access-compiler-arm64.cc",
"src/ic/arm64/handler-compiler-arm64.cc",
"src/ic/arm64/ic-arm64.cc",
"src/ic/arm64/ic-compiler-arm64.cc",
"src/ic/arm64/stub-cache-arm64.cc",
] ]
} else if (v8_target_arch == "mipsel") { } else if (v8_target_arch == "mipsel") {
sources += [ sources += [
@ -1026,7 +1095,7 @@ source_set("v8_base") {
"src/mips/frames-mips.cc", "src/mips/frames-mips.cc",
"src/mips/frames-mips.h", "src/mips/frames-mips.h",
"src/mips/full-codegen-mips.cc", "src/mips/full-codegen-mips.cc",
"src/mips/ic-mips.cc", "src/mips/interface-descriptors-mips.cc",
"src/mips/lithium-codegen-mips.cc", "src/mips/lithium-codegen-mips.cc",
"src/mips/lithium-codegen-mips.h", "src/mips/lithium-codegen-mips.h",
"src/mips/lithium-gap-resolver-mips.cc", "src/mips/lithium-gap-resolver-mips.cc",
@ -1038,7 +1107,48 @@ source_set("v8_base") {
"src/mips/regexp-macro-assembler-mips.cc", "src/mips/regexp-macro-assembler-mips.cc",
"src/mips/regexp-macro-assembler-mips.h", "src/mips/regexp-macro-assembler-mips.h",
"src/mips/simulator-mips.cc", "src/mips/simulator-mips.cc",
"src/mips/stub-cache-mips.cc", "src/ic/mips/access-compiler-mips.cc",
"src/ic/mips/handler-compiler-mips.cc",
"src/ic/mips/ic-mips.cc",
"src/ic/mips/ic-compiler-mips.cc",
"src/ic/mips/stub-cache-mips.cc",
]
} else if (v8_target_arch == "mips64el") {
sources += [
"src/mips64/assembler-mips64.cc",
"src/mips64/assembler-mips64.h",
"src/mips64/assembler-mips64-inl.h",
"src/mips64/builtins-mips64.cc",
"src/mips64/codegen-mips64.cc",
"src/mips64/codegen-mips64.h",
"src/mips64/code-stubs-mips64.cc",
"src/mips64/code-stubs-mips64.h",
"src/mips64/constants-mips64.cc",
"src/mips64/constants-mips64.h",
"src/mips64/cpu-mips64.cc",
"src/mips64/debug-mips64.cc",
"src/mips64/deoptimizer-mips64.cc",
"src/mips64/disasm-mips64.cc",
"src/mips64/frames-mips64.cc",
"src/mips64/frames-mips64.h",
"src/mips64/full-codegen-mips64.cc",
"src/mips64/interface-descriptors-mips64.cc",
"src/mips64/lithium-codegen-mips64.cc",
"src/mips64/lithium-codegen-mips64.h",
"src/mips64/lithium-gap-resolver-mips64.cc",
"src/mips64/lithium-gap-resolver-mips64.h",
"src/mips64/lithium-mips64.cc",
"src/mips64/lithium-mips64.h",
"src/mips64/macro-assembler-mips64.cc",
"src/mips64/macro-assembler-mips64.h",
"src/mips64/regexp-macro-assembler-mips64.cc",
"src/mips64/regexp-macro-assembler-mips64.h",
"src/mips64/simulator-mips64.cc",
"src/ic/mips64/access-compiler-mips64.cc",
"src/ic/mips64/handler-compiler-mips64.cc",
"src/ic/mips64/ic-mips64.cc",
"src/ic/mips64/ic-compiler-mips64.cc",
"src/ic/mips64/stub-cache-mips64.cc",
] ]
} }
@ -1046,9 +1156,18 @@ source_set("v8_base") {
configs += [ "//build/config/compiler:no_chromium_code" ] configs += [ "//build/config/compiler:no_chromium_code" ]
configs += [ ":internal_config", ":features", ":toolchain" ] configs += [ ":internal_config", ":features", ":toolchain" ]
if (!is_debug) {
configs -= [ "//build/config/compiler:optimize" ]
configs += [ "//build/config/compiler:optimize_max" ]
}
defines = [] defines = []
deps = [ ":v8_libbase" ] deps = [ ":v8_libbase" ]
if (is_win) {
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
cflags = [ "/wd4267" ]
}
if (is_linux) { if (is_linux) {
if (v8_compress_startup_data == "bz2") { if (v8_compress_startup_data == "bz2") {
libs += [ "bz2" ] libs += [ "bz2" ]
@ -1076,7 +1195,7 @@ source_set("v8_base") {
} }
source_set("v8_libbase") { source_set("v8_libbase") {
visibility = ":*" # Only targets in this file can depend on this. visibility = [ ":*" ] # Only targets in this file can depend on this.
sources = [ sources = [
"src/base/atomicops.h", "src/base/atomicops.h",
@ -1089,9 +1208,14 @@ source_set("v8_libbase") {
"src/base/atomicops_internals_x86_gcc.cc", "src/base/atomicops_internals_x86_gcc.cc",
"src/base/atomicops_internals_x86_gcc.h", "src/base/atomicops_internals_x86_gcc.h",
"src/base/atomicops_internals_x86_msvc.h", "src/base/atomicops_internals_x86_msvc.h",
"src/base/bits.cc",
"src/base/bits.h",
"src/base/build_config.h", "src/base/build_config.h",
"src/base/cpu.cc", "src/base/cpu.cc",
"src/base/cpu.h", "src/base/cpu.h",
"src/base/division-by-constant.cc",
"src/base/division-by-constant.h",
"src/base/flags.h",
"src/base/lazy-instance.h", "src/base/lazy-instance.h",
"src/base/logging.cc", "src/base/logging.cc",
"src/base/logging.h", "src/base/logging.h",
@ -1112,6 +1236,8 @@ source_set("v8_libbase") {
"src/base/safe_conversions_impl.h", "src/base/safe_conversions_impl.h",
"src/base/safe_math.h", "src/base/safe_math.h",
"src/base/safe_math_impl.h", "src/base/safe_math_impl.h",
"src/base/sys-info.cc",
"src/base/sys-info.h",
"src/base/utils/random-number-generator.cc", "src/base/utils/random-number-generator.cc",
"src/base/utils/random-number-generator.h", "src/base/utils/random-number-generator.h",
] ]
@ -1120,6 +1246,11 @@ source_set("v8_libbase") {
configs += [ "//build/config/compiler:no_chromium_code" ] configs += [ "//build/config/compiler:no_chromium_code" ]
configs += [ ":internal_config_base", ":features", ":toolchain" ] configs += [ ":internal_config_base", ":features", ":toolchain" ]
if (!is_debug) {
configs -= [ "//build/config/compiler:optimize" ]
configs += [ "//build/config/compiler:optimize_max" ]
}
defines = [] defines = []
if (is_posix) { if (is_posix) {
@ -1183,6 +1314,11 @@ source_set("v8_libplatform") {
configs += [ "//build/config/compiler:no_chromium_code" ] configs += [ "//build/config/compiler:no_chromium_code" ]
configs += [ ":internal_config_base", ":features", ":toolchain" ] configs += [ ":internal_config_base", ":features", ":toolchain" ]
if (!is_debug) {
configs -= [ "//build/config/compiler:optimize" ]
configs += [ "//build/config/compiler:optimize_max" ]
}
deps = [ deps = [
":v8_libbase", ":v8_libbase",
] ]
@ -1194,7 +1330,7 @@ source_set("v8_libplatform") {
if (current_toolchain == host_toolchain) { if (current_toolchain == host_toolchain) {
executable("mksnapshot") { executable("mksnapshot") {
visibility = ":*" # Only targets in this file can depend on this. visibility = [ ":*" ] # Only targets in this file can depend on this.
sources = [ sources = [
"src/mksnapshot.cc", "src/mksnapshot.cc",
@ -1250,6 +1386,7 @@ component("v8") {
direct_dependent_configs = [ ":external_config" ] direct_dependent_configs = [ ":external_config" ]
libs = []
if (is_android && current_toolchain != host_toolchain) { if (is_android && current_toolchain != host_toolchain) {
libs += [ "log" ] libs += [ "log" ]
} }

256
deps/v8/ChangeLog vendored
View File

@ -1,4 +1,258 @@
2014-08-13: Version 3.28.73 2014-09-30: Version 3.29.93
Add a getter for the address and size of the code range to the pulic API
(issue 3598).
Convert `obj` ToObject in Object.keys() (issue 3587).
Performance and stability improvements on all platforms.
2014-09-29: Version 3.29.92
Performance and stability improvements on all platforms.
2014-09-26: Version 3.29.91
Performance and stability improvements on all platforms.
2014-09-25: Version 3.29.88
Performance and stability improvements on all platforms.
2014-09-24: Version 3.29.87
Preserve message when rethrowing exception (issue 3583).
Fix escaped index JSON parsing (Chromium issue 416449).
Performance and stability improvements on all platforms.
2014-09-23: Version 3.29.84
Performance and stability improvements on all platforms.
2014-09-23: Version 3.29.83
Performance and stability improvements on all platforms.
2014-09-23: Version 3.29.82
Fix escaped index JSON parsing (Chromium issue 416449).
Performance and stability improvements on all platforms.
2014-09-17: Version 3.29.70
Enable ES6 generators (issue 2355).
Fixed int vs. uintptr_t confusion (plus some cleanup on the way) (issue
3556).
Move configuration of ResourceConstraints to Isolate construction.
Performance and stability improvements on all platforms.
2014-09-16: Version 3.29.66
Currently, a new isolate is created in an uninitialized state, and
several API methods will automatically initialize it. During this
uninitialized state, code event handlers and function entry handlers can
be attached to the isolate.
Performance and stability improvements on all platforms.
2014-09-15: Version 3.29.64
ES6: String(symbol) should work like symbol.toString (issue 3554).
Arrow functions: Cleanup handling of the prototype property (issue
2700).
Remove V8_HOST_CAN_READ_UNALIGNED and its uses (Chromium issue 412967).
Fix Smi vs. HeapObject confusion in HConstants (Chromium issue 412215).
Performance and stability improvements on all platforms.
2014-09-12: Version 3.29.59
Do not use wide reads in CopyCharsUnsigned (Chromium issue 412967).
Fix inaccurate type condition in Hydrogen (Chromium issue 412210).
Fix crash in ScriptDebugServer::wrapCallFrames (Chromium issue 411196).
Performance and stability improvements on all platforms.
2014-09-11: Version 3.29.57
ES6: Add support for method shorthand in object literals (issue 3516).
Unbreak FreeBSD build (hopefully) (issue 3548).
Performance and stability improvements on all platforms.
2014-09-09: Version 3.29.53
Performance and stability improvements on all platforms.
2014-09-08: Version 3.29.50
Allocate a new empty number dictionary when resetting elements (Chromium
issue 410332).
Performance and stability improvements on all platforms.
2014-09-05: Version 3.29.43
Enforce correct number comparisons when inlining Array.indexOf (Chromium
issue 407946).
Performance and stability improvements on all platforms.
2014-09-04: Version 3.29.41
Performance and stability improvements on all platforms.
2014-09-03: Version 3.29.40
Use correct receiver for DOM accessors on the prototype chain (issue
3538).
Performance and stability improvements on all platforms.
2014-09-02: Version 3.29.38
Do not clear weak monomorphic IC after context disposal (Chromium issue
404020).
Turn on job-based sweeping (issue 3104).
Performance and stability improvements on all platforms.
2014-09-01: Version 3.29.35
Performance and stability improvements on all platforms.
2014-08-29: Version 3.29.29
Performance and stability improvements on all platforms.
2014-08-28: Version 3.29.27
Performance and stability improvements on all platforms.
2014-08-28: Version 3.29.25
Performance and stability improvements on all platforms.
2014-08-28: Version 3.29.24
Tweaks to generate XP-compatible .exes (Chromium issue 407517).
Performance and stability improvements on all platforms.
2014-08-28: Version 3.29.23
Performance and stability improvements on all platforms.
2014-08-27: Version 3.29.20
Handle empty allocation list in CodeRange properly (issue 3540, Chromium
issue 407566).
Fixed inlining of constant values (issue 3529).
Performance and stability improvements on all platforms.
2014-08-25: Version 3.29.17
Performance and stability improvements on all platforms.
2014-08-24: Version 3.29.16
Fix issue with numeric property names (issue 3507).
Add back the duplicate property checker (issue 3498).
Performance and stability improvements on all platforms.
2014-08-22: Version 3.29.14
Don't inline Array.shift() if receiver map is not extensible (Chromium
issue 405517).
Performance and stability improvements on all platforms.
2014-08-21: Version 3.29.11
Refactor ParseObjectLiteral.
Support symbol-named properties in API (issue 3394).
Suppress test262 test that tests duplicate properties.
ES6: Duplicate properties are no longer an error (issue 3498).
Expose function CheckDebugBreak in the debugger api.
Remove RegExp.$input (issue 3486).
Performance and stability improvements on all platforms.
2014-08-21: Version 3.29.10
ES6: Make sure we do not store -0 as the key in Map/Set (issue 3515).
Remove removed flags from tests.
Expose well-known Symbols to C++ API (Chromium issue 341423).
Implement ES6 Array.of() (issue 3427).
Performance and stability improvements on all platforms.
2014-08-20: Version 3.29.9
Correctly handle holes when concat()ing double arrays (Chromium issue
403409).
[turbofan] Refactor the InstructionSelector tests (issue 3489).
ES6: Make Map/Set constructors support iterable values (issue 3508).
WeakMap/WeakSet: Add test for non object keys (issue 3399).
Performance and stability improvements on all platforms. Performance and stability improvements on all platforms.

7
deps/v8/DEPS vendored
View File

@ -3,6 +3,8 @@
# all paths in here must match this assumption. # all paths in here must match this assumption.
vars = { vars = {
"chromium_git": "https://chromium.googlesource.com",
"chromium_trunk": "https://src.chromium.org/svn/trunk", "chromium_trunk": "https://src.chromium.org/svn/trunk",
"buildtools_revision": "fb782d4369d5ae04f17a2fceef7de5a63e50f07b", "buildtools_revision": "fb782d4369d5ae04f17a2fceef7de5a63e50f07b",
@ -28,6 +30,11 @@ deps = {
} }
deps_os = { deps_os = {
"android": {
"v8/third_party/android_tools":
Var("chromium_git") + "/android_tools.git" + "@" +
"31869996507de16812bb53a3d0aaa15cd6194c16",
},
"win": { "win": {
"v8/third_party/cygwin": "v8/third_party/cygwin":
Var("chromium_trunk") + "/deps/third_party/cygwin@66844", Var("chromium_trunk") + "/deps/third_party/cygwin@66844",

4
deps/v8/Makefile vendored
View File

@ -230,8 +230,8 @@ NACL_ARCHES = nacl_ia32 nacl_x64
# List of files that trigger Makefile regeneration: # List of files that trigger Makefile regeneration:
GYPFILES = build/all.gyp build/features.gypi build/standalone.gypi \ GYPFILES = build/all.gyp build/features.gypi build/standalone.gypi \
build/toolchain.gypi samples/samples.gyp src/d8.gyp \ build/toolchain.gypi samples/samples.gyp src/compiler/compiler.gyp \
test/cctest/cctest.gyp tools/gyp/v8.gyp src/d8.gyp test/cctest/cctest.gyp tools/gyp/v8.gyp
# If vtunejit=on, the v8vtune.gyp will be appended. # If vtunejit=on, the v8vtune.gyp will be appended.
ifeq ($(vtunejit), on) ifeq ($(vtunejit), on)

View File

@ -64,20 +64,20 @@ else
DEFINES += android_target_arch=mips mips_arch_variant=mips32r2 DEFINES += android_target_arch=mips mips_arch_variant=mips32r2
TOOLCHAIN_ARCH = mipsel-linux-android TOOLCHAIN_ARCH = mipsel-linux-android
TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH) TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
TOOLCHAIN_VER = 4.6 TOOLCHAIN_VER = 4.8
else else
ifeq ($(ARCH), android_ia32) ifeq ($(ARCH), android_ia32)
DEFINES = target_arch=ia32 v8_target_arch=ia32 android_target_arch=x86 android_target_platform=14 DEFINES = target_arch=ia32 v8_target_arch=ia32 android_target_arch=x86 android_target_platform=14
TOOLCHAIN_ARCH = x86 TOOLCHAIN_ARCH = x86
TOOLCHAIN_PREFIX = i686-linux-android TOOLCHAIN_PREFIX = i686-linux-android
TOOLCHAIN_VER = 4.6 TOOLCHAIN_VER = 4.8
else else
ifeq ($(ARCH), android_x87) ifeq ($(ARCH), android_x87)
DEFINES = target_arch=x87 v8_target_arch=x87 android_target_arch=x86 android_target_platform=14 DEFINES = target_arch=x87 v8_target_arch=x87 android_target_arch=x86 android_target_platform=14
TOOLCHAIN_ARCH = x86 TOOLCHAIN_ARCH = x86
TOOLCHAIN_PREFIX = i686-linux-android TOOLCHAIN_PREFIX = i686-linux-android
TOOLCHAIN_VER = 4.6 TOOLCHAIN_VER = 4.8
else else
$(error Target architecture "${ARCH}" is not supported) $(error Target architecture "${ARCH}" is not supported)
endif endif

79
deps/v8/PRESUBMIT.py vendored
View File

@ -34,6 +34,32 @@ for more details about the presubmit API built into gcl.
import sys import sys
_EXCLUDED_PATHS = (
r"^test[\\\/].*",
r"^testing[\\\/].*",
r"^third_party[\\\/].*",
r"^tools[\\\/].*",
)
# Regular expression that matches code only used for test binaries
# (best effort).
_TEST_CODE_EXCLUDED_PATHS = (
r'.+-unittest\.cc',
# Has a method VisitForTest().
r'src[\\\/]compiler[\\\/]ast-graph-builder\.cc',
# Test extension.
r'src[\\\/]extensions[\\\/]gc-extension\.cc',
)
_TEST_ONLY_WARNING = (
'You might be calling functions intended only for testing from\n'
'production code. It is OK to ignore this warning if you know what\n'
'you are doing, as the heuristics used to detect the situation are\n'
'not perfect. The commit queue will not block on this warning.')
def _V8PresubmitChecks(input_api, output_api): def _V8PresubmitChecks(input_api, output_api):
"""Runs the V8 presubmit checks.""" """Runs the V8 presubmit checks."""
import sys import sys
@ -41,7 +67,7 @@ def _V8PresubmitChecks(input_api, output_api):
input_api.PresubmitLocalPath(), 'tools')) input_api.PresubmitLocalPath(), 'tools'))
from presubmit import CppLintProcessor from presubmit import CppLintProcessor
from presubmit import SourceProcessor from presubmit import SourceProcessor
from presubmit import CheckGeneratedRuntimeTests from presubmit import CheckRuntimeVsNativesNameClashes
from presubmit import CheckExternalReferenceRegistration from presubmit import CheckExternalReferenceRegistration
results = [] results = []
@ -51,9 +77,9 @@ def _V8PresubmitChecks(input_api, output_api):
results.append(output_api.PresubmitError( results.append(output_api.PresubmitError(
"Copyright header, trailing whitespaces and two empty lines " \ "Copyright header, trailing whitespaces and two empty lines " \
"between declarations check failed")) "between declarations check failed"))
if not CheckGeneratedRuntimeTests(input_api.PresubmitLocalPath()): if not CheckRuntimeVsNativesNameClashes(input_api.PresubmitLocalPath()):
results.append(output_api.PresubmitError( results.append(output_api.PresubmitError(
"Generated runtime tests check failed")) "Runtime/natives name clash check failed"))
if not CheckExternalReferenceRegistration(input_api.PresubmitLocalPath()): if not CheckExternalReferenceRegistration(input_api.PresubmitLocalPath()):
results.append(output_api.PresubmitError( results.append(output_api.PresubmitError(
"External references registration check failed")) "External references registration check failed"))
@ -113,6 +139,49 @@ def _CheckUnwantedDependencies(input_api, output_api):
return results return results
def _CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api):
"""Attempts to prevent use of functions intended only for testing in
non-testing code. For now this is just a best-effort implementation
that ignores header files and may have some false positives. A
better implementation would probably need a proper C++ parser.
"""
# We only scan .cc files, as the declaration of for-testing functions in
# header files are hard to distinguish from calls to such functions without a
# proper C++ parser.
file_inclusion_pattern = r'.+\.cc'
base_function_pattern = r'[ :]test::[^\s]+|ForTest(ing)?|for_test(ing)?'
inclusion_pattern = input_api.re.compile(r'(%s)\s*\(' % base_function_pattern)
comment_pattern = input_api.re.compile(r'//.*(%s)' % base_function_pattern)
exclusion_pattern = input_api.re.compile(
r'::[A-Za-z0-9_]+(%s)|(%s)[^;]+\{' % (
base_function_pattern, base_function_pattern))
def FilterFile(affected_file):
black_list = (_EXCLUDED_PATHS +
_TEST_CODE_EXCLUDED_PATHS +
input_api.DEFAULT_BLACK_LIST)
return input_api.FilterSourceFile(
affected_file,
white_list=(file_inclusion_pattern, ),
black_list=black_list)
problems = []
for f in input_api.AffectedSourceFiles(FilterFile):
local_path = f.LocalPath()
for line_number, line in f.ChangedContents():
if (inclusion_pattern.search(line) and
not comment_pattern.search(line) and
not exclusion_pattern.search(line)):
problems.append(
'%s:%d\n %s' % (local_path, line_number, line.strip()))
if problems:
return [output_api.PresubmitPromptOrNotify(_TEST_ONLY_WARNING, problems)]
else:
return []
def _CommonChecks(input_api, output_api): def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit.""" """Checks common to both upload and commit."""
results = [] results = []
@ -122,6 +191,8 @@ def _CommonChecks(input_api, output_api):
input_api, output_api)) input_api, output_api))
results.extend(_V8PresubmitChecks(input_api, output_api)) results.extend(_V8PresubmitChecks(input_api, output_api))
results.extend(_CheckUnwantedDependencies(input_api, output_api)) results.extend(_CheckUnwantedDependencies(input_api, output_api))
results.extend(
_CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api))
return results return results
@ -180,6 +251,6 @@ def GetPreferredTryMasters(project, change):
'v8_linux_layout_dbg': set(['defaulttests']), 'v8_linux_layout_dbg': set(['defaulttests']),
'v8_mac_rel': set(['defaulttests']), 'v8_mac_rel': set(['defaulttests']),
'v8_win_rel': set(['defaulttests']), 'v8_win_rel': set(['defaulttests']),
'v8_win64_rel': set(['defaulttests']), 'v8_win64_compile_rel': set(['defaulttests']),
}, },
} }

View File

@ -3,7 +3,7 @@
"main": "run.js", "main": "run.js",
"run_count": 2, "run_count": 2,
"results_regexp": "^%s: (.+)$", "results_regexp": "^%s: (.+)$",
"benchmarks": [ "tests": [
{"name": "Richards"}, {"name": "Richards"},
{"name": "DeltaBlue"}, {"name": "DeltaBlue"},
{"name": "Crypto"}, {"name": "Crypto"},

View File

@ -9,10 +9,12 @@
'type': 'none', 'type': 'none',
'dependencies': [ 'dependencies': [
'../samples/samples.gyp:*', '../samples/samples.gyp:*',
'../src/base/base.gyp:base-unittests',
'../src/compiler/compiler.gyp:compiler-unittests',
'../src/d8.gyp:d8', '../src/d8.gyp:d8',
'../test/base-unittests/base-unittests.gyp:*', '../src/heap/heap.gyp:heap-unittests',
'../src/libplatform/libplatform.gyp:libplatform-unittests',
'../test/cctest/cctest.gyp:*', '../test/cctest/cctest.gyp:*',
'../test/compiler-unittests/compiler-unittests.gyp:*',
], ],
'conditions': [ 'conditions': [
['component!="shared_library"', { ['component!="shared_library"', {

View File

@ -87,7 +87,6 @@
'-pthread', # Not supported by Android toolchain. '-pthread', # Not supported by Android toolchain.
], ],
'cflags': [ 'cflags': [
'-U__linux__', # Don't allow toolchain to claim -D__linux__
'-ffunction-sections', '-ffunction-sections',
'-funwind-tables', '-funwind-tables',
'-fstack-protector', '-fstack-protector',

View File

@ -19,6 +19,7 @@ def main():
print 'Landmines test.' print 'Landmines test.'
print 'Activating MSVS 2013.' print 'Activating MSVS 2013.'
print 'Revert activation of MSVS 2013.' print 'Revert activation of MSVS 2013.'
print 'Activating MSVS 2013 again.'
return 0 return 0

View File

@ -215,9 +215,18 @@
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \ ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd"', { or OS=="netbsd"', {
'target_defaults': { 'target_defaults': {
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter', 'cflags': [
'-Wno-long-long', '-pthread', '-fno-exceptions', '-Wall',
'-pedantic' ], '<(werror)',
'-W',
'-Wno-unused-parameter',
'-Wno-long-long',
'-pthread',
'-fno-exceptions',
'-pedantic',
# Don't warn about the "struct foo f = {0};" initialization pattern.
'-Wno-missing-field-initializers',
],
'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti', '-std=gnu++0x' ], 'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti', '-std=gnu++0x' ],
'ldflags': [ '-pthread', ], 'ldflags': [ '-pthread', ],
'conditions': [ 'conditions': [
@ -234,8 +243,15 @@
# or OS=="netbsd"' # or OS=="netbsd"'
['OS=="qnx"', { ['OS=="qnx"', {
'target_defaults': { 'target_defaults': {
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter', 'cflags': [
'-fno-exceptions' ], '-Wall',
'<(werror)',
'-W',
'-Wno-unused-parameter',
'-fno-exceptions',
# Don't warn about the "struct foo f = {0};" initialization pattern.
'-Wno-missing-field-initializers',
],
'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti', '-std=gnu++0x' ], 'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti', '-std=gnu++0x' ],
'conditions': [ 'conditions': [
[ 'visibility=="hidden"', { [ 'visibility=="hidden"', {
@ -263,6 +279,7 @@
'defines': [ 'defines': [
'_CRT_SECURE_NO_DEPRECATE', '_CRT_SECURE_NO_DEPRECATE',
'_CRT_NONSTDC_NO_DEPRECATE', '_CRT_NONSTDC_NO_DEPRECATE',
'_USING_V110_SDK71_',
], ],
'conditions': [ 'conditions': [
['component=="static_library"', { ['component=="static_library"', {
@ -298,6 +315,7 @@
'AdditionalOptions': ['/ignore:4221'], 'AdditionalOptions': ['/ignore:4221'],
}, },
'VCLinkerTool': { 'VCLinkerTool': {
'MinimumRequiredVersion': '5.01', # XP.
'AdditionalDependencies': [ 'AdditionalDependencies': [
'ws2_32.lib', 'ws2_32.lib',
], ],
@ -359,6 +377,8 @@
'-Wendif-labels', '-Wendif-labels',
'-W', '-W',
'-Wno-unused-parameter', '-Wno-unused-parameter',
# Don't warn about the "struct foo f = {0};" initialization pattern.
'-Wno-missing-field-initializers',
], ],
}, },
'conditions': [ 'conditions': [

View File

@ -58,6 +58,14 @@
# Default arch variant for MIPS. # Default arch variant for MIPS.
'mips_arch_variant%': 'r2', 'mips_arch_variant%': 'r2',
# Possible values fp32, fp64, fpxx.
# fp32 - 32 32-bit FPU registers are available, doubles are placed in
# register pairs.
# fp64 - 32 64-bit FPU registers are available.
# fpxx - compatibility mode, it chooses fp32 or fp64 depending on runtime
# detection
'mips_fpu_mode%': 'fp32',
'v8_enable_backtrace%': 0, 'v8_enable_backtrace%': 0,
# Enable profiling support. Only required on Windows. # Enable profiling support. Only required on Windows.
@ -83,6 +91,9 @@
# Allow to suppress the array bounds warning (default is no suppression). # Allow to suppress the array bounds warning (default is no suppression).
'wno_array_bounds%': '', 'wno_array_bounds%': '',
# Link-Time Optimizations
'use_lto%': 0,
'variables': { 'variables': {
# This is set when building the Android WebView inside the Android build # This is set when building the Android WebView inside the Android build
# system, using the 'android' gyp backend. # system, using the 'android' gyp backend.
@ -233,6 +244,15 @@
}], }],
], ],
}], }],
# Disable LTO for v8
# v8 is optimized for speed, which takes precedence over
# size optimization in LTO.
['use_lto==1', {
'cflags!': [
'-flto',
'-ffat-lto-objects',
],
}],
], ],
}], # _toolset=="target" }], # _toolset=="target"
], ],
@ -272,10 +292,33 @@
'cflags': ['-msoft-float'], 'cflags': ['-msoft-float'],
'ldflags': ['-msoft-float'], 'ldflags': ['-msoft-float'],
}], }],
['mips_fpu_mode=="fp64"', {
'cflags': ['-mfp64'],
}],
['mips_fpu_mode=="fpxx"', {
'cflags': ['-mfpxx'],
}],
['mips_fpu_mode=="fp32"', {
'cflags': ['-mfp32'],
}],
['mips_arch_variant=="r6"', {
'cflags!': ['-mfp32'],
'cflags': ['-mips32r6', '-Wa,-mips32r6'],
'ldflags': [
'-mips32r6',
'-Wl,--dynamic-linker=$(LDSO_PATH)',
'-Wl,--rpath=$(LD_R_PATH)',
],
}],
['mips_arch_variant=="r2"', { ['mips_arch_variant=="r2"', {
'cflags': ['-mips32r2', '-Wa,-mips32r2'], 'cflags': ['-mips32r2', '-Wa,-mips32r2'],
}], }],
['mips_arch_variant=="r1"', { ['mips_arch_variant=="r1"', {
'cflags!': ['-mfp64'],
'cflags': ['-mips32', '-Wa,-mips32'],
}],
['mips_arch_variant=="rx"', {
'cflags!': ['-mfp64'],
'cflags': ['-mips32', '-Wa,-mips32'], 'cflags': ['-mips32', '-Wa,-mips32'],
}], }],
], ],
@ -297,8 +340,34 @@
'__mips_soft_float=1' '__mips_soft_float=1'
], ],
}], }],
['mips_arch_variant=="rx"', {
'defines': [
'_MIPS_ARCH_MIPS32RX',
'FPU_MODE_FPXX',
],
}],
['mips_arch_variant=="r6"', {
'defines': [
'_MIPS_ARCH_MIPS32R6',
'FPU_MODE_FP64',
],
}],
['mips_arch_variant=="r2"', { ['mips_arch_variant=="r2"', {
'defines': ['_MIPS_ARCH_MIPS32R2',], 'defines': ['_MIPS_ARCH_MIPS32R2',],
'conditions': [
['mips_fpu_mode=="fp64"', {
'defines': ['FPU_MODE_FP64',],
}],
['mips_fpu_mode=="fpxx"', {
'defines': ['FPU_MODE_FPXX',],
}],
['mips_fpu_mode=="fp32"', {
'defines': ['FPU_MODE_FP32',],
}],
],
}],
['mips_arch_variant=="r1"', {
'defines': ['FPU_MODE_FP32',],
}], }],
], ],
}], # v8_target_arch=="mips" }], # v8_target_arch=="mips"
@ -321,13 +390,37 @@
'cflags': ['-msoft-float'], 'cflags': ['-msoft-float'],
'ldflags': ['-msoft-float'], 'ldflags': ['-msoft-float'],
}], }],
['mips_fpu_mode=="fp64"', {
'cflags': ['-mfp64'],
}],
['mips_fpu_mode=="fpxx"', {
'cflags': ['-mfpxx'],
}],
['mips_fpu_mode=="fp32"', {
'cflags': ['-mfp32'],
}],
['mips_arch_variant=="r6"', {
'cflags!': ['-mfp32'],
'cflags': ['-mips32r6', '-Wa,-mips32r6'],
'ldflags': [
'-mips32r6',
'-Wl,--dynamic-linker=$(LDSO_PATH)',
'-Wl,--rpath=$(LD_R_PATH)',
],
}],
['mips_arch_variant=="r2"', { ['mips_arch_variant=="r2"', {
'cflags': ['-mips32r2', '-Wa,-mips32r2'], 'cflags': ['-mips32r2', '-Wa,-mips32r2'],
}], }],
['mips_arch_variant=="r1"', { ['mips_arch_variant=="r1"', {
'cflags!': ['-mfp64'],
'cflags': ['-mips32', '-Wa,-mips32'],
}],
['mips_arch_variant=="rx"', {
'cflags!': ['-mfp64'],
'cflags': ['-mips32', '-Wa,-mips32'], 'cflags': ['-mips32', '-Wa,-mips32'],
}], }],
['mips_arch_variant=="loongson"', { ['mips_arch_variant=="loongson"', {
'cflags!': ['-mfp64'],
'cflags': ['-mips3', '-Wa,-mips3'], 'cflags': ['-mips3', '-Wa,-mips3'],
}], }],
], ],
@ -349,11 +442,40 @@
'__mips_soft_float=1' '__mips_soft_float=1'
], ],
}], }],
['mips_arch_variant=="rx"', {
'defines': [
'_MIPS_ARCH_MIPS32RX',
'FPU_MODE_FPXX',
],
}],
['mips_arch_variant=="r6"', {
'defines': [
'_MIPS_ARCH_MIPS32R6',
'FPU_MODE_FP64',
],
}],
['mips_arch_variant=="r2"', { ['mips_arch_variant=="r2"', {
'defines': ['_MIPS_ARCH_MIPS32R2',], 'defines': ['_MIPS_ARCH_MIPS32R2',],
'conditions': [
['mips_fpu_mode=="fp64"', {
'defines': ['FPU_MODE_FP64',],
}],
['mips_fpu_mode=="fpxx"', {
'defines': ['FPU_MODE_FPXX',],
}],
['mips_fpu_mode=="fp32"', {
'defines': ['FPU_MODE_FP32',],
}],
],
}],
['mips_arch_variant=="r1"', {
'defines': ['FPU_MODE_FP32',],
}], }],
['mips_arch_variant=="loongson"', { ['mips_arch_variant=="loongson"', {
'defines': ['_MIPS_ARCH_LOONGSON',], 'defines': [
'_MIPS_ARCH_LOONGSON',
'FPU_MODE_FP32',
],
}], }],
], ],
}], # v8_target_arch=="mipsel" }], # v8_target_arch=="mipsel"
@ -499,6 +621,12 @@
'cflags': [ '-m32' ], 'cflags': [ '-m32' ],
'ldflags': [ '-m32' ], 'ldflags': [ '-m32' ],
}], }],
# Enable feedback-directed optimisation when building in android.
[ 'android_webview_build == 1', {
'aosp_build_settings': {
'LOCAL_FDO_SUPPORT': 'true',
},
}],
], ],
'xcode_settings': { 'xcode_settings': {
'ARCHS': [ 'i386' ], 'ARCHS': [ 'i386' ],
@ -523,6 +651,12 @@
'cflags': [ '-m64' ], 'cflags': [ '-m64' ],
'ldflags': [ '-m64' ], 'ldflags': [ '-m64' ],
}], }],
# Enable feedback-directed optimisation when building in android.
[ 'android_webview_build == 1', {
'aosp_build_settings': {
'LOCAL_FDO_SUPPORT': 'true',
},
}],
] ]
}], }],
], ],

View File

@ -167,6 +167,9 @@ class V8_EXPORT Debug {
// happened yet. // happened yet.
static void CancelDebugBreak(Isolate* isolate); static void CancelDebugBreak(Isolate* isolate);
// Check if a debugger break is scheduled in the given isolate.
static bool CheckDebugBreak(Isolate* isolate);
// Break execution of JavaScript in the given isolate (this method // Break execution of JavaScript in the given isolate (this method
// can be invoked from a non-VM thread) for further client command // can be invoked from a non-VM thread) for further client command
// execution on a VM thread. Client data is then passed in // execution on a VM thread. Client data is then passed in

645
deps/v8/include/v8.h vendored
View File

@ -77,6 +77,7 @@ class ImplementationUtilities;
class Int32; class Int32;
class Integer; class Integer;
class Isolate; class Isolate;
class Name;
class Number; class Number;
class NumberObject; class NumberObject;
class Object; class Object;
@ -129,6 +130,7 @@ class Heap;
class HeapObject; class HeapObject;
class Isolate; class Isolate;
class Object; class Object;
struct StreamedSource;
template<typename T> class CustomArguments; template<typename T> class CustomArguments;
class PropertyCallbackArguments; class PropertyCallbackArguments;
class FunctionCallbackArguments; class FunctionCallbackArguments;
@ -1087,6 +1089,73 @@ class V8_EXPORT ScriptCompiler {
CachedData* cached_data; CachedData* cached_data;
}; };
/**
* For streaming incomplete script data to V8. The embedder should implement a
* subclass of this class.
*/
class ExternalSourceStream {
public:
virtual ~ExternalSourceStream() {}
/**
* V8 calls this to request the next chunk of data from the embedder. This
* function will be called on a background thread, so it's OK to block and
* wait for the data, if the embedder doesn't have data yet. Returns the
* length of the data returned. When the data ends, GetMoreData should
* return 0. Caller takes ownership of the data.
*
* When streaming UTF-8 data, V8 handles multi-byte characters split between
* two data chunks, but doesn't handle multi-byte characters split between
* more than two data chunks. The embedder can avoid this problem by always
* returning at least 2 bytes of data.
*
* If the embedder wants to cancel the streaming, they should make the next
* GetMoreData call return 0. V8 will interpret it as end of data (and most
* probably, parsing will fail). The streaming task will return as soon as
* V8 has parsed the data it received so far.
*/
virtual size_t GetMoreData(const uint8_t** src) = 0;
};
/**
* Source code which can be streamed into V8 in pieces. It will be parsed
* while streaming. It can be compiled after the streaming is complete.
* StreamedSource must be kept alive while the streaming task is ran (see
* ScriptStreamingTask below).
*/
class V8_EXPORT StreamedSource {
public:
enum Encoding { ONE_BYTE, TWO_BYTE, UTF8 };
StreamedSource(ExternalSourceStream* source_stream, Encoding encoding);
~StreamedSource();
// Ownership of the CachedData or its buffers is *not* transferred to the
// caller. The CachedData object is alive as long as the StreamedSource
// object is alive.
const CachedData* GetCachedData() const;
internal::StreamedSource* impl() const { return impl_; }
private:
// Prevent copying. Not implemented.
StreamedSource(const StreamedSource&);
StreamedSource& operator=(const StreamedSource&);
internal::StreamedSource* impl_;
};
/**
* A streaming task which the embedder must run on a background thread to
* stream scripts into V8. Returned by ScriptCompiler::StartStreamingScript.
*/
class ScriptStreamingTask {
public:
virtual ~ScriptStreamingTask() {}
virtual void Run() = 0;
};
enum CompileOptions { enum CompileOptions {
kNoCompileOptions = 0, kNoCompileOptions = 0,
kProduceParserCache, kProduceParserCache,
@ -1129,6 +1198,32 @@ class V8_EXPORT ScriptCompiler {
static Local<Script> Compile( static Local<Script> Compile(
Isolate* isolate, Source* source, Isolate* isolate, Source* source,
CompileOptions options = kNoCompileOptions); CompileOptions options = kNoCompileOptions);
/**
* Returns a task which streams script data into V8, or NULL if the script
* cannot be streamed. The user is responsible for running the task on a
* background thread and deleting it. When ran, the task starts parsing the
* script, and it will request data from the StreamedSource as needed. When
* ScriptStreamingTask::Run exits, all data has been streamed and the script
* can be compiled (see Compile below).
*
* This API allows to start the streaming with as little data as possible, and
* the remaining data (for example, the ScriptOrigin) is passed to Compile.
*/
static ScriptStreamingTask* StartStreamingScript(
Isolate* isolate, StreamedSource* source,
CompileOptions options = kNoCompileOptions);
/**
* Compiles a streamed script (bound to current context).
*
* This can only be called after the streaming has finished
* (ScriptStreamingTask has been run). V8 doesn't construct the source string
* during streaming, so the embedder needs to pass the full source here.
*/
static Local<Script> Compile(Isolate* isolate, StreamedSource* source,
Handle<String> full_source_string,
const ScriptOrigin& origin);
}; };
@ -1366,6 +1461,12 @@ class V8_EXPORT Value : public Data {
*/ */
bool IsFalse() const; bool IsFalse() const;
/**
* Returns true if this value is a symbol or a string.
* This is an experimental feature.
*/
bool IsName() const;
/** /**
* Returns true if this value is an instance of the String type. * Returns true if this value is an instance of the String type.
* See ECMA-262 8.4. * See ECMA-262 8.4.
@ -1423,6 +1524,11 @@ class V8_EXPORT Value : public Data {
*/ */
bool IsDate() const; bool IsDate() const;
/**
* Returns true if this value is an Arguments object.
*/
bool IsArgumentsObject() const;
/** /**
* Returns true if this value is a Boolean object. * Returns true if this value is a Boolean object.
*/ */
@ -1454,12 +1560,48 @@ class V8_EXPORT Value : public Data {
*/ */
bool IsRegExp() const; bool IsRegExp() const;
/**
* Returns true if this value is a Generator function.
* This is an experimental feature.
*/
bool IsGeneratorFunction() const;
/**
* Returns true if this value is a Generator object (iterator).
* This is an experimental feature.
*/
bool IsGeneratorObject() const;
/** /**
* Returns true if this value is a Promise. * Returns true if this value is a Promise.
* This is an experimental feature. * This is an experimental feature.
*/ */
bool IsPromise() const; bool IsPromise() const;
/**
* Returns true if this value is a Map.
* This is an experimental feature.
*/
bool IsMap() const;
/**
* Returns true if this value is a Set.
* This is an experimental feature.
*/
bool IsSet() const;
/**
* Returns true if this value is a WeakMap.
* This is an experimental feature.
*/
bool IsWeakMap() const;
/**
* Returns true if this value is a WeakSet.
* This is an experimental feature.
*/
bool IsWeakSet() const;
/** /**
* Returns true if this value is an ArrayBuffer. * Returns true if this value is an ArrayBuffer.
* This is an experimental feature. * This is an experimental feature.
@ -1593,15 +1735,26 @@ class V8_EXPORT Boolean : public Primitive {
}; };
/**
* A superclass for symbols and strings.
*/
class V8_EXPORT Name : public Primitive {
public:
V8_INLINE static Name* Cast(v8::Value* obj);
private:
static void CheckCast(v8::Value* obj);
};
/** /**
* A JavaScript string value (ECMA-262, 4.3.17). * A JavaScript string value (ECMA-262, 4.3.17).
*/ */
class V8_EXPORT String : public Primitive { class V8_EXPORT String : public Name {
public: public:
enum Encoding { enum Encoding {
UNKNOWN_ENCODING = 0x1, UNKNOWN_ENCODING = 0x1,
TWO_BYTE_ENCODING = 0x0, TWO_BYTE_ENCODING = 0x0,
ASCII_ENCODING = 0x4, ASCII_ENCODING = 0x4, // TODO(yangguo): deprecate this.
ONE_BYTE_ENCODING = 0x4 ONE_BYTE_ENCODING = 0x4
}; };
/** /**
@ -1657,7 +1810,8 @@ class V8_EXPORT String : public Primitive {
NO_OPTIONS = 0, NO_OPTIONS = 0,
HINT_MANY_WRITES_EXPECTED = 1, HINT_MANY_WRITES_EXPECTED = 1,
NO_NULL_TERMINATION = 2, NO_NULL_TERMINATION = 2,
PRESERVE_ASCII_NULL = 4, PRESERVE_ASCII_NULL = 4, // TODO(yangguo): deprecate this.
PRESERVE_ONE_BYTE_NULL = 4,
// Used by WriteUtf8 to replace orphan surrogate code units with the // Used by WriteUtf8 to replace orphan surrogate code units with the
// unicode replacement character. Needs to be set to guarantee valid UTF-8 // unicode replacement character. Needs to be set to guarantee valid UTF-8
// output. // output.
@ -1691,9 +1845,12 @@ class V8_EXPORT String : public Primitive {
bool IsExternal() const; bool IsExternal() const;
/** /**
* Returns true if the string is both external and ASCII * Returns true if the string is both external and one-byte.
*/ */
bool IsExternalAscii() const; bool IsExternalOneByte() const;
// TODO(yangguo): deprecate this.
bool IsExternalAscii() const { return IsExternalOneByte(); }
class V8_EXPORT ExternalStringResourceBase { // NOLINT class V8_EXPORT ExternalStringResourceBase { // NOLINT
public: public:
@ -1748,33 +1905,32 @@ class V8_EXPORT String : public Primitive {
}; };
/** /**
* An ExternalAsciiStringResource is a wrapper around an ASCII * An ExternalOneByteStringResource is a wrapper around an one-byte
* string buffer that resides outside V8's heap. Implement an * string buffer that resides outside V8's heap. Implement an
* ExternalAsciiStringResource to manage the life cycle of the * ExternalOneByteStringResource to manage the life cycle of the
* underlying buffer. Note that the string data must be immutable * underlying buffer. Note that the string data must be immutable
* and that the data must be strict (7-bit) ASCII, not Latin-1 or * and that the data must be Latin-1 and not UTF-8, which would require
* UTF-8, which would require special treatment internally in the * special treatment internally in the engine and do not allow efficient
* engine and, in the case of UTF-8, do not allow efficient indexing. * indexing. Use String::New or convert to 16 bit data for non-Latin1.
* Use String::New or convert to 16 bit data for non-ASCII.
*/ */
class V8_EXPORT ExternalAsciiStringResource class V8_EXPORT ExternalOneByteStringResource
: public ExternalStringResourceBase { : public ExternalStringResourceBase {
public: public:
/** /**
* Override the destructor to manage the life cycle of the underlying * Override the destructor to manage the life cycle of the underlying
* buffer. * buffer.
*/ */
virtual ~ExternalAsciiStringResource() {} virtual ~ExternalOneByteStringResource() {}
/** The string data from the underlying buffer.*/ /** The string data from the underlying buffer.*/
virtual const char* data() const = 0; virtual const char* data() const = 0;
/** The number of ASCII characters in the string.*/ /** The number of Latin-1 characters in the string.*/
virtual size_t length() const = 0; virtual size_t length() const = 0;
protected: protected:
ExternalAsciiStringResource() {} ExternalOneByteStringResource() {}
}; };
typedef ExternalAsciiStringResource ExternalOneByteStringResource; typedef ExternalOneByteStringResource ExternalAsciiStringResource;
/** /**
* If the string is an external string, return the ExternalStringResourceBase * If the string is an external string, return the ExternalStringResourceBase
@ -1791,10 +1947,15 @@ class V8_EXPORT String : public Primitive {
V8_INLINE ExternalStringResource* GetExternalStringResource() const; V8_INLINE ExternalStringResource* GetExternalStringResource() const;
/** /**
* Get the ExternalAsciiStringResource for an external ASCII string. * Get the ExternalOneByteStringResource for an external one-byte string.
* Returns NULL if IsExternalAscii() doesn't return true. * Returns NULL if IsExternalOneByte() doesn't return true.
*/ */
const ExternalAsciiStringResource* GetExternalAsciiStringResource() const; const ExternalOneByteStringResource* GetExternalOneByteStringResource() const;
// TODO(yangguo): deprecate this.
const ExternalAsciiStringResource* GetExternalAsciiStringResource() const {
return GetExternalOneByteStringResource();
}
V8_INLINE static String* Cast(v8::Value* obj); V8_INLINE static String* Cast(v8::Value* obj);
@ -1851,7 +2012,7 @@ class V8_EXPORT String : public Primitive {
bool MakeExternal(ExternalStringResource* resource); bool MakeExternal(ExternalStringResource* resource);
/** /**
* Creates a new external string using the ASCII data defined in the given * Creates a new external string using the one-byte data defined in the given
* resource. When the external string is no longer live on V8's heap the * resource. When the external string is no longer live on V8's heap the
* resource will be disposed by calling its Dispose method. The caller of * resource will be disposed by calling its Dispose method. The caller of
* this function should not otherwise delete or modify the resource. Neither * this function should not otherwise delete or modify the resource. Neither
@ -1859,7 +2020,7 @@ class V8_EXPORT String : public Primitive {
* destructor of the external string resource. * destructor of the external string resource.
*/ */
static Local<String> NewExternal(Isolate* isolate, static Local<String> NewExternal(Isolate* isolate,
ExternalAsciiStringResource* resource); ExternalOneByteStringResource* resource);
/** /**
* Associate an external string resource with this string by transforming it * Associate an external string resource with this string by transforming it
@ -1870,7 +2031,7 @@ class V8_EXPORT String : public Primitive {
* The string is not modified if the operation fails. See NewExternal for * The string is not modified if the operation fails. See NewExternal for
* information on the lifetime of the resource. * information on the lifetime of the resource.
*/ */
bool MakeExternal(ExternalAsciiStringResource* resource); bool MakeExternal(ExternalOneByteStringResource* resource);
/** /**
* Returns true if this string can be made external. * Returns true if this string can be made external.
@ -1935,7 +2096,7 @@ class V8_EXPORT String : public Primitive {
* *
* This is an experimental feature. Use at your own risk. * This is an experimental feature. Use at your own risk.
*/ */
class V8_EXPORT Symbol : public Primitive { class V8_EXPORT Symbol : public Name {
public: public:
// Returns the print name string of the symbol, or undefined if none. // Returns the print name string of the symbol, or undefined if none.
Local<Value> Name() const; Local<Value> Name() const;
@ -1955,7 +2116,12 @@ class V8_EXPORT Symbol : public Primitive {
// registry that is not accessible by (and cannot clash with) JavaScript code. // registry that is not accessible by (and cannot clash with) JavaScript code.
static Local<Symbol> ForApi(Isolate *isolate, Local<String> name); static Local<Symbol> ForApi(Isolate *isolate, Local<String> name);
// Well-known symbols
static Local<Symbol> GetIterator(Isolate* isolate);
static Local<Symbol> GetUnscopables(Isolate* isolate);
V8_INLINE static Symbol* Cast(v8::Value* obj); V8_INLINE static Symbol* Cast(v8::Value* obj);
private: private:
Symbol(); Symbol();
static void CheckCast(v8::Value* obj); static void CheckCast(v8::Value* obj);
@ -2079,12 +2245,19 @@ enum ExternalArrayType {
typedef void (*AccessorGetterCallback)( typedef void (*AccessorGetterCallback)(
Local<String> property, Local<String> property,
const PropertyCallbackInfo<Value>& info); const PropertyCallbackInfo<Value>& info);
typedef void (*AccessorNameGetterCallback)(
Local<Name> property,
const PropertyCallbackInfo<Value>& info);
typedef void (*AccessorSetterCallback)( typedef void (*AccessorSetterCallback)(
Local<String> property, Local<String> property,
Local<Value> value, Local<Value> value,
const PropertyCallbackInfo<void>& info); const PropertyCallbackInfo<void>& info);
typedef void (*AccessorNameSetterCallback)(
Local<Name> property,
Local<Value> value,
const PropertyCallbackInfo<void>& info);
/** /**
@ -2159,14 +2332,20 @@ class V8_EXPORT Object : public Value {
Handle<Value> data = Handle<Value>(), Handle<Value> data = Handle<Value>(),
AccessControl settings = DEFAULT, AccessControl settings = DEFAULT,
PropertyAttribute attribute = None); PropertyAttribute attribute = None);
bool SetAccessor(Handle<Name> name,
AccessorNameGetterCallback getter,
AccessorNameSetterCallback setter = 0,
Handle<Value> data = Handle<Value>(),
AccessControl settings = DEFAULT,
PropertyAttribute attribute = None);
// This function is not yet stable and should not be used at this time. // This function is not yet stable and should not be used at this time.
bool SetDeclaredAccessor(Local<String> name, bool SetDeclaredAccessor(Local<Name> name,
Local<DeclaredAccessorDescriptor> descriptor, Local<DeclaredAccessorDescriptor> descriptor,
PropertyAttribute attribute = None, PropertyAttribute attribute = None,
AccessControl settings = DEFAULT); AccessControl settings = DEFAULT);
void SetAccessorProperty(Local<String> name, void SetAccessorProperty(Local<Name> name,
Local<Function> getter, Local<Function> getter,
Handle<Function> setter = Handle<Function>(), Handle<Function> setter = Handle<Function>(),
PropertyAttribute attribute = None, PropertyAttribute attribute = None,
@ -3168,12 +3347,12 @@ class V8_EXPORT External : public Value {
class V8_EXPORT Template : public Data { class V8_EXPORT Template : public Data {
public: public:
/** Adds a property to each instance created by this template.*/ /** Adds a property to each instance created by this template.*/
void Set(Handle<String> name, Handle<Data> value, void Set(Handle<Name> name, Handle<Data> value,
PropertyAttribute attributes = None); PropertyAttribute attributes = None);
V8_INLINE void Set(Isolate* isolate, const char* name, Handle<Data> value); V8_INLINE void Set(Isolate* isolate, const char* name, Handle<Data> value);
void SetAccessorProperty( void SetAccessorProperty(
Local<String> name, Local<Name> name,
Local<FunctionTemplate> getter = Local<FunctionTemplate>(), Local<FunctionTemplate> getter = Local<FunctionTemplate>(),
Local<FunctionTemplate> setter = Local<FunctionTemplate>(), Local<FunctionTemplate> setter = Local<FunctionTemplate>(),
PropertyAttribute attribute = None, PropertyAttribute attribute = None,
@ -3215,9 +3394,18 @@ class V8_EXPORT Template : public Data {
Local<AccessorSignature> signature = Local<AccessorSignature> signature =
Local<AccessorSignature>(), Local<AccessorSignature>(),
AccessControl settings = DEFAULT); AccessControl settings = DEFAULT);
void SetNativeDataProperty(Local<Name> name,
AccessorNameGetterCallback getter,
AccessorNameSetterCallback setter = 0,
// TODO(dcarney): gcc can't handle Local below
Handle<Value> data = Handle<Value>(),
PropertyAttribute attribute = None,
Local<AccessorSignature> signature =
Local<AccessorSignature>(),
AccessControl settings = DEFAULT);
// This function is not yet stable and should not be used at this time. // This function is not yet stable and should not be used at this time.
bool SetDeclaredAccessor(Local<String> name, bool SetDeclaredAccessor(Local<Name> name,
Local<DeclaredAccessorDescriptor> descriptor, Local<DeclaredAccessorDescriptor> descriptor,
PropertyAttribute attribute = None, PropertyAttribute attribute = None,
Local<AccessorSignature> signature = Local<AccessorSignature> signature =
@ -3584,12 +3772,20 @@ class V8_EXPORT ObjectTemplate : public Template {
PropertyAttribute attribute = None, PropertyAttribute attribute = None,
Handle<AccessorSignature> signature = Handle<AccessorSignature> signature =
Handle<AccessorSignature>()); Handle<AccessorSignature>());
void SetAccessor(Handle<Name> name,
AccessorNameGetterCallback getter,
AccessorNameSetterCallback setter = 0,
Handle<Value> data = Handle<Value>(),
AccessControl settings = DEFAULT,
PropertyAttribute attribute = None,
Handle<AccessorSignature> signature =
Handle<AccessorSignature>());
/** /**
* Sets a named property handler on the object template. * Sets a named property handler on the object template.
* *
* Whenever a named property is accessed on objects created from * Whenever a property whose name is a string is accessed on objects created
* this object template, the provided callback is invoked instead of * from this object template, the provided callback is invoked instead of
* accessing the property directly on the JavaScript object. * accessing the property directly on the JavaScript object.
* *
* \param getter The callback to invoke when getting a property. * \param getter The callback to invoke when getting a property.
@ -3792,11 +3988,11 @@ class V8_EXPORT TypeSwitch : public Data {
// --- Extensions --- // --- Extensions ---
class V8_EXPORT ExternalAsciiStringResourceImpl class V8_EXPORT ExternalOneByteStringResourceImpl
: public String::ExternalAsciiStringResource { : public String::ExternalOneByteStringResource {
public: public:
ExternalAsciiStringResourceImpl() : data_(0), length_(0) {} ExternalOneByteStringResourceImpl() : data_(0), length_(0) {}
ExternalAsciiStringResourceImpl(const char* data, size_t length) ExternalOneByteStringResourceImpl(const char* data, size_t length)
: data_(data), length_(length) {} : data_(data), length_(length) {}
const char* data() const { return data_; } const char* data() const { return data_; }
size_t length() const { return length_; } size_t length() const { return length_; }
@ -3826,7 +4022,7 @@ class V8_EXPORT Extension { // NOLINT
const char* name() const { return name_; } const char* name() const { return name_; }
size_t source_length() const { return source_length_; } size_t source_length() const { return source_length_; }
const String::ExternalAsciiStringResource* source() const { const String::ExternalOneByteStringResource* source() const {
return &source_; } return &source_; }
int dependency_count() { return dep_count_; } int dependency_count() { return dep_count_; }
const char** dependencies() { return deps_; } const char** dependencies() { return deps_; }
@ -3836,7 +4032,7 @@ class V8_EXPORT Extension { // NOLINT
private: private:
const char* name_; const char* name_;
size_t source_length_; // expected to initialize before source_ size_t source_length_; // expected to initialize before source_
ExternalAsciiStringResourceImpl source_; ExternalOneByteStringResourceImpl source_;
int dep_count_; int dep_count_;
const char** deps_; const char** deps_;
bool auto_enable_; bool auto_enable_;
@ -3915,13 +4111,6 @@ class V8_EXPORT ResourceConstraints {
}; };
/**
* Sets the given ResourceConstraints on the given Isolate.
*/
bool V8_EXPORT SetResourceConstraints(Isolate* isolate,
ResourceConstraints* constraints);
// --- Exceptions --- // --- Exceptions ---
@ -4059,17 +4248,149 @@ class V8_EXPORT HeapStatistics {
class RetainedObjectInfo; class RetainedObjectInfo;
/** /**
* Isolate represents an isolated instance of the V8 engine. V8 * FunctionEntryHook is the type of the profile entry hook called at entry to
* isolates have completely separate states. Objects from one isolate * any generated function when function-level profiling is enabled.
* must not be used in other isolates. When V8 is initialized a *
* default isolate is implicitly created and entered. The embedder * \param function the address of the function that's being entered.
* can create additional isolates and use them in parallel in multiple * \param return_addr_location points to a location on stack where the machine
* threads. An isolate can be entered by at most one thread at any * return address resides. This can be used to identify the caller of
* given time. The Locker/Unlocker API must be used to synchronize. * \p function, and/or modified to divert execution when \p function exits.
*
* \note the entry hook must not cause garbage collection.
*/
typedef void (*FunctionEntryHook)(uintptr_t function,
uintptr_t return_addr_location);
/**
* A JIT code event is issued each time code is added, moved or removed.
*
* \note removal events are not currently issued.
*/
struct JitCodeEvent {
enum EventType {
CODE_ADDED,
CODE_MOVED,
CODE_REMOVED,
CODE_ADD_LINE_POS_INFO,
CODE_START_LINE_INFO_RECORDING,
CODE_END_LINE_INFO_RECORDING
};
// Definition of the code position type. The "POSITION" type means the place
// in the source code which are of interest when making stack traces to
// pin-point the source location of a stack frame as close as possible.
// The "STATEMENT_POSITION" means the place at the beginning of each
// statement, and is used to indicate possible break locations.
enum PositionType { POSITION, STATEMENT_POSITION };
// Type of event.
EventType type;
// Start of the instructions.
void* code_start;
// Size of the instructions.
size_t code_len;
// Script info for CODE_ADDED event.
Handle<UnboundScript> script;
// User-defined data for *_LINE_INFO_* event. It's used to hold the source
// code line information which is returned from the
// CODE_START_LINE_INFO_RECORDING event. And it's passed to subsequent
// CODE_ADD_LINE_POS_INFO and CODE_END_LINE_INFO_RECORDING events.
void* user_data;
struct name_t {
// Name of the object associated with the code, note that the string is not
// zero-terminated.
const char* str;
// Number of chars in str.
size_t len;
};
struct line_info_t {
// PC offset
size_t offset;
// Code postion
size_t pos;
// The position type.
PositionType position_type;
};
union {
// Only valid for CODE_ADDED.
struct name_t name;
// Only valid for CODE_ADD_LINE_POS_INFO
struct line_info_t line_info;
// New location of instructions. Only valid for CODE_MOVED.
void* new_code_start;
};
};
/**
* Option flags passed to the SetJitCodeEventHandler function.
*/
enum JitCodeEventOptions {
kJitCodeEventDefault = 0,
// Generate callbacks for already existent code.
kJitCodeEventEnumExisting = 1
};
/**
* Callback function passed to SetJitCodeEventHandler.
*
* \param event code add, move or removal event.
*/
typedef void (*JitCodeEventHandler)(const JitCodeEvent* event);
/**
* Isolate represents an isolated instance of the V8 engine. V8 isolates have
* completely separate states. Objects from one isolate must not be used in
* other isolates. The embedder can create multiple isolates and use them in
* parallel in multiple threads. An isolate can be entered by at most one
* thread at any given time. The Locker/Unlocker API must be used to
* synchronize.
*/ */
class V8_EXPORT Isolate { class V8_EXPORT Isolate {
public: public:
/**
* Initial configuration parameters for a new Isolate.
*/
struct CreateParams {
CreateParams()
: entry_hook(NULL),
code_event_handler(NULL),
enable_serializer(false) {}
/**
* The optional entry_hook allows the host application to provide the
* address of a function that's invoked on entry to every V8-generated
* function. Note that entry_hook is invoked at the very start of each
* generated function. Furthermore, if an entry_hook is given, V8 will
* always run without a context snapshot.
*/
FunctionEntryHook entry_hook;
/**
* Allows the host application to provide the address of a function that is
* notified each time code is added, moved or removed.
*/
JitCodeEventHandler code_event_handler;
/**
* ResourceConstraints to use for the new Isolate.
*/
ResourceConstraints constraints;
/**
* This flag currently renders the Isolate unusable.
*/
bool enable_serializer;
};
/** /**
* Stack-allocated class which sets the isolate for all operations * Stack-allocated class which sets the isolate for all operations
* executed within a local scope. * executed within a local scope.
@ -4177,8 +4498,10 @@ class V8_EXPORT Isolate {
* *
* When an isolate is no longer used its resources should be freed * When an isolate is no longer used its resources should be freed
* by calling Dispose(). Using the delete operator is not allowed. * by calling Dispose(). Using the delete operator is not allowed.
*
* V8::Initialize() must have run prior to this.
*/ */
static Isolate* New(); static Isolate* New(const CreateParams& params = CreateParams());
/** /**
* Returns the entered isolate for the current thread or NULL in * Returns the entered isolate for the current thread or NULL in
@ -4488,6 +4811,54 @@ class V8_EXPORT Isolate {
*/ */
int ContextDisposedNotification(); int ContextDisposedNotification();
/**
* Allows the host application to provide the address of a function that is
* notified each time code is added, moved or removed.
*
* \param options options for the JIT code event handler.
* \param event_handler the JIT code event handler, which will be invoked
* each time code is added, moved or removed.
* \note \p event_handler won't get notified of existent code.
* \note since code removal notifications are not currently issued, the
* \p event_handler may get notifications of code that overlaps earlier
* code notifications. This happens when code areas are reused, and the
* earlier overlapping code areas should therefore be discarded.
* \note the events passed to \p event_handler and the strings they point to
* are not guaranteed to live past each call. The \p event_handler must
* copy strings and other parameters it needs to keep around.
* \note the set of events declared in JitCodeEvent::EventType is expected to
* grow over time, and the JitCodeEvent structure is expected to accrue
* new members. The \p event_handler function must ignore event codes
* it does not recognize to maintain future compatibility.
* \note Use Isolate::CreateParams to get events for code executed during
* Isolate setup.
*/
void SetJitCodeEventHandler(JitCodeEventOptions options,
JitCodeEventHandler event_handler);
/**
* Modifies the stack limit for this Isolate.
*
* \param stack_limit An address beyond which the Vm's stack may not grow.
*
* \note If you are using threads then you should hold the V8::Locker lock
* while setting the stack limit and you must set a non-default stack
* limit separately for each thread.
*/
void SetStackLimit(uintptr_t stack_limit);
/**
* Returns a memory range that can potentially contain jitted code.
*
* On Win64, embedders are advised to install function table callbacks for
* these ranges, as default SEH won't be able to unwind through jitted code.
*
* Might be empty on other platforms.
*
* https://code.google.com/p/v8/issues/detail?id=3598
*/
void GetCodeRange(void** start, size_t* length_in_bytes);
private: private:
template<class K, class V, class Traits> friend class PersistentValueMap; template<class K, class V, class Traits> friend class PersistentValueMap;
@ -4566,106 +4937,6 @@ typedef uintptr_t (*ReturnAddressLocationResolver)(
uintptr_t return_addr_location); uintptr_t return_addr_location);
/**
* FunctionEntryHook is the type of the profile entry hook called at entry to
* any generated function when function-level profiling is enabled.
*
* \param function the address of the function that's being entered.
* \param return_addr_location points to a location on stack where the machine
* return address resides. This can be used to identify the caller of
* \p function, and/or modified to divert execution when \p function exits.
*
* \note the entry hook must not cause garbage collection.
*/
typedef void (*FunctionEntryHook)(uintptr_t function,
uintptr_t return_addr_location);
/**
* A JIT code event is issued each time code is added, moved or removed.
*
* \note removal events are not currently issued.
*/
struct JitCodeEvent {
enum EventType {
CODE_ADDED,
CODE_MOVED,
CODE_REMOVED,
CODE_ADD_LINE_POS_INFO,
CODE_START_LINE_INFO_RECORDING,
CODE_END_LINE_INFO_RECORDING
};
// Definition of the code position type. The "POSITION" type means the place
// in the source code which are of interest when making stack traces to
// pin-point the source location of a stack frame as close as possible.
// The "STATEMENT_POSITION" means the place at the beginning of each
// statement, and is used to indicate possible break locations.
enum PositionType {
POSITION,
STATEMENT_POSITION
};
// Type of event.
EventType type;
// Start of the instructions.
void* code_start;
// Size of the instructions.
size_t code_len;
// Script info for CODE_ADDED event.
Handle<UnboundScript> script;
// User-defined data for *_LINE_INFO_* event. It's used to hold the source
// code line information which is returned from the
// CODE_START_LINE_INFO_RECORDING event. And it's passed to subsequent
// CODE_ADD_LINE_POS_INFO and CODE_END_LINE_INFO_RECORDING events.
void* user_data;
struct name_t {
// Name of the object associated with the code, note that the string is not
// zero-terminated.
const char* str;
// Number of chars in str.
size_t len;
};
struct line_info_t {
// PC offset
size_t offset;
// Code postion
size_t pos;
// The position type.
PositionType position_type;
};
union {
// Only valid for CODE_ADDED.
struct name_t name;
// Only valid for CODE_ADD_LINE_POS_INFO
struct line_info_t line_info;
// New location of instructions. Only valid for CODE_MOVED.
void* new_code_start;
};
};
/**
* Option flags passed to the SetJitCodeEventHandler function.
*/
enum JitCodeEventOptions {
kJitCodeEventDefault = 0,
// Generate callbacks for already existent code.
kJitCodeEventEnumExisting = 1
};
/**
* Callback function passed to SetJitCodeEventHandler.
*
* \param event code add, move or removal event.
*/
typedef void (*JitCodeEventHandler)(const JitCodeEvent* event);
/** /**
* Interface for iterating through all external resources in the heap. * Interface for iterating through all external resources in the heap.
*/ */
@ -4854,9 +5125,8 @@ class V8_EXPORT V8 {
static void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback); static void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
/** /**
* Initializes from snapshot if possible. Otherwise, attempts to * Initializes V8. This function needs to be called before the first Isolate
* initialize from scratch. This function is called implicitly if * is created. It always returns true.
* you use the API without calling it first.
*/ */
static bool Initialize(); static bool Initialize();
@ -4873,45 +5143,6 @@ class V8_EXPORT V8 {
static void SetReturnAddressLocationResolver( static void SetReturnAddressLocationResolver(
ReturnAddressLocationResolver return_address_resolver); ReturnAddressLocationResolver return_address_resolver);
/**
* Allows the host application to provide the address of a function that's
* invoked on entry to every V8-generated function.
* Note that \p entry_hook is invoked at the very start of each
* generated function.
*
* \param isolate the isolate to operate on.
* \param entry_hook a function that will be invoked on entry to every
* V8-generated function.
* \returns true on success on supported platforms, false on failure.
* \note Setting an entry hook can only be done very early in an isolates
* lifetime, and once set, the entry hook cannot be revoked.
*/
static bool SetFunctionEntryHook(Isolate* isolate,
FunctionEntryHook entry_hook);
/**
* Allows the host application to provide the address of a function that is
* notified each time code is added, moved or removed.
*
* \param options options for the JIT code event handler.
* \param event_handler the JIT code event handler, which will be invoked
* each time code is added, moved or removed.
* \note \p event_handler won't get notified of existent code.
* \note since code removal notifications are not currently issued, the
* \p event_handler may get notifications of code that overlaps earlier
* code notifications. This happens when code areas are reused, and the
* earlier overlapping code areas should therefore be discarded.
* \note the events passed to \p event_handler and the strings they point to
* are not guaranteed to live past each call. The \p event_handler must
* copy strings and other parameters it needs to keep around.
* \note the set of events declared in JitCodeEvent::EventType is expected to
* grow over time, and the JitCodeEvent structure is expected to accrue
* new members. The \p event_handler function must ignore event codes
* it does not recognize to maintain future compatibility.
*/
static void SetJitCodeEventHandler(JitCodeEventOptions options,
JitCodeEventHandler event_handler);
/** /**
* Forcefully terminate the current thread of JavaScript execution * Forcefully terminate the current thread of JavaScript execution
* in the given isolate. * in the given isolate.
@ -5517,15 +5748,16 @@ template <size_t ptr_size> struct SmiTagging;
template<int kSmiShiftSize> template<int kSmiShiftSize>
V8_INLINE internal::Object* IntToSmi(int value) { V8_INLINE internal::Object* IntToSmi(int value) {
int smi_shift_bits = kSmiTagSize + kSmiShiftSize; int smi_shift_bits = kSmiTagSize + kSmiShiftSize;
intptr_t tagged_value = uintptr_t tagged_value =
(static_cast<intptr_t>(value) << smi_shift_bits) | kSmiTag; (static_cast<uintptr_t>(value) << smi_shift_bits) | kSmiTag;
return reinterpret_cast<internal::Object*>(tagged_value); return reinterpret_cast<internal::Object*>(tagged_value);
} }
// Smi constants for 32-bit systems. // Smi constants for 32-bit systems.
template <> struct SmiTagging<4> { template <> struct SmiTagging<4> {
static const int kSmiShiftSize = 0; enum { kSmiShiftSize = 0, kSmiValueSize = 31 };
static const int kSmiValueSize = 31; static int SmiShiftSize() { return kSmiShiftSize; }
static int SmiValueSize() { return kSmiValueSize; }
V8_INLINE static int SmiToInt(const internal::Object* value) { V8_INLINE static int SmiToInt(const internal::Object* value) {
int shift_bits = kSmiTagSize + kSmiShiftSize; int shift_bits = kSmiTagSize + kSmiShiftSize;
// Throw away top 32 bits and shift down (requires >> to be sign extending). // Throw away top 32 bits and shift down (requires >> to be sign extending).
@ -5552,8 +5784,9 @@ template <> struct SmiTagging<4> {
// Smi constants for 64-bit systems. // Smi constants for 64-bit systems.
template <> struct SmiTagging<8> { template <> struct SmiTagging<8> {
static const int kSmiShiftSize = 31; enum { kSmiShiftSize = 31, kSmiValueSize = 32 };
static const int kSmiValueSize = 32; static int SmiShiftSize() { return kSmiShiftSize; }
static int SmiValueSize() { return kSmiValueSize; }
V8_INLINE static int SmiToInt(const internal::Object* value) { V8_INLINE static int SmiToInt(const internal::Object* value) {
int shift_bits = kSmiTagSize + kSmiShiftSize; int shift_bits = kSmiTagSize + kSmiShiftSize;
// Shift down and throw away top 32 bits. // Shift down and throw away top 32 bits.
@ -5597,7 +5830,7 @@ class Internals {
static const int kFullStringRepresentationMask = 0x07; static const int kFullStringRepresentationMask = 0x07;
static const int kStringEncodingMask = 0x4; static const int kStringEncodingMask = 0x4;
static const int kExternalTwoByteRepresentationTag = 0x02; static const int kExternalTwoByteRepresentationTag = 0x02;
static const int kExternalAsciiRepresentationTag = 0x06; static const int kExternalOneByteRepresentationTag = 0x06;
static const int kIsolateEmbedderDataOffset = 0 * kApiPointerSize; static const int kIsolateEmbedderDataOffset = 0 * kApiPointerSize;
static const int kAmountOfExternalAllocatedMemoryOffset = static const int kAmountOfExternalAllocatedMemoryOffset =
@ -5686,7 +5919,7 @@ class Internals {
V8_INLINE static void UpdateNodeFlag(internal::Object** obj, V8_INLINE static void UpdateNodeFlag(internal::Object** obj,
bool value, int shift) { bool value, int shift) {
uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset; uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
uint8_t mask = static_cast<uint8_t>(1 << shift); uint8_t mask = static_cast<uint8_t>(1U << shift);
*addr = static_cast<uint8_t>((*addr & ~mask) | (value << shift)); *addr = static_cast<uint8_t>((*addr & ~mask) | (value << shift));
} }
@ -6268,7 +6501,7 @@ String::ExternalStringResourceBase* String::GetExternalStringResourceBase(
int type = I::GetInstanceType(obj) & I::kFullStringRepresentationMask; int type = I::GetInstanceType(obj) & I::kFullStringRepresentationMask;
*encoding_out = static_cast<Encoding>(type & I::kStringEncodingMask); *encoding_out = static_cast<Encoding>(type & I::kStringEncodingMask);
ExternalStringResourceBase* resource = NULL; ExternalStringResourceBase* resource = NULL;
if (type == I::kExternalAsciiRepresentationTag || if (type == I::kExternalOneByteRepresentationTag ||
type == I::kExternalTwoByteRepresentationTag) { type == I::kExternalTwoByteRepresentationTag) {
void* value = I::ReadField<void*>(obj, I::kStringResourceOffset); void* value = I::ReadField<void*>(obj, I::kStringResourceOffset);
resource = static_cast<ExternalStringResourceBase*>(value); resource = static_cast<ExternalStringResourceBase*>(value);
@ -6338,6 +6571,14 @@ template <class T> Value* Value::Cast(T* value) {
} }
Name* Name::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
#endif
return static_cast<Name*>(value);
}
Symbol* Symbol::Cast(v8::Value* value) { Symbol* Symbol::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS #ifdef V8_ENABLE_CHECKS
CheckCast(value); CheckCast(value);

View File

@ -175,7 +175,12 @@
// V8_HAS_ATTRIBUTE_VISIBILITY - __attribute__((visibility)) supported // V8_HAS_ATTRIBUTE_VISIBILITY - __attribute__((visibility)) supported
// V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT - __attribute__((warn_unused_result)) // V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT - __attribute__((warn_unused_result))
// supported // supported
// V8_HAS_BUILTIN_CLZ - __builtin_clz() supported
// V8_HAS_BUILTIN_CTZ - __builtin_ctz() supported
// V8_HAS_BUILTIN_EXPECT - __builtin_expect() supported // V8_HAS_BUILTIN_EXPECT - __builtin_expect() supported
// V8_HAS_BUILTIN_POPCOUNT - __builtin_popcount() supported
// V8_HAS_BUILTIN_SADD_OVERFLOW - __builtin_sadd_overflow() supported
// V8_HAS_BUILTIN_SSUB_OVERFLOW - __builtin_ssub_overflow() supported
// V8_HAS_DECLSPEC_ALIGN - __declspec(align(n)) supported // V8_HAS_DECLSPEC_ALIGN - __declspec(align(n)) supported
// V8_HAS_DECLSPEC_DEPRECATED - __declspec(deprecated) supported // V8_HAS_DECLSPEC_DEPRECATED - __declspec(deprecated) supported
// V8_HAS_DECLSPEC_NOINLINE - __declspec(noinline) supported // V8_HAS_DECLSPEC_NOINLINE - __declspec(noinline) supported
@ -206,7 +211,12 @@
# define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \ # define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \
(__has_attribute(warn_unused_result)) (__has_attribute(warn_unused_result))
# define V8_HAS_BUILTIN_CLZ (__has_builtin(__builtin_clz))
# define V8_HAS_BUILTIN_CTZ (__has_builtin(__builtin_ctz))
# define V8_HAS_BUILTIN_EXPECT (__has_builtin(__builtin_expect)) # define V8_HAS_BUILTIN_EXPECT (__has_builtin(__builtin_expect))
# define V8_HAS_BUILTIN_POPCOUNT (__has_builtin(__builtin_popcount))
# define V8_HAS_BUILTIN_SADD_OVERFLOW (__has_builtin(__builtin_sadd_overflow))
# define V8_HAS_BUILTIN_SSUB_OVERFLOW (__has_builtin(__builtin_ssub_overflow))
# define V8_HAS_CXX11_ALIGNAS (__has_feature(cxx_alignas)) # define V8_HAS_CXX11_ALIGNAS (__has_feature(cxx_alignas))
# define V8_HAS_CXX11_STATIC_ASSERT (__has_feature(cxx_static_assert)) # define V8_HAS_CXX11_STATIC_ASSERT (__has_feature(cxx_static_assert))
@ -238,7 +248,10 @@
# define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \ # define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \
(!V8_CC_INTEL && V8_GNUC_PREREQ(4, 1, 0)) (!V8_CC_INTEL && V8_GNUC_PREREQ(4, 1, 0))
# define V8_HAS_BUILTIN_CLZ (V8_GNUC_PREREQ(3, 4, 0))
# define V8_HAS_BUILTIN_CTZ (V8_GNUC_PREREQ(3, 4, 0))
# define V8_HAS_BUILTIN_EXPECT (V8_GNUC_PREREQ(2, 96, 0)) # define V8_HAS_BUILTIN_EXPECT (V8_GNUC_PREREQ(2, 96, 0))
# define V8_HAS_BUILTIN_POPCOUNT (V8_GNUC_PREREQ(3, 4, 0))
// g++ requires -std=c++0x or -std=gnu++0x to support C++11 functionality // g++ requires -std=c++0x or -std=gnu++0x to support C++11 functionality
// without warnings (functionality used by the macros below). These modes // without warnings (functionality used by the macros below). These modes
@ -321,24 +334,6 @@ declarator __attribute__((deprecated))
#endif #endif
// A macro to mark variables or types as unused, avoiding compiler warnings.
#if V8_HAS_ATTRIBUTE_UNUSED
# define V8_UNUSED __attribute__((unused))
#else
# define V8_UNUSED
#endif
// Annotate a function indicating the caller must examine the return value.
// Use like:
// int foo() V8_WARN_UNUSED_RESULT;
#if V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT
# define V8_WARN_UNUSED_RESULT __attribute__((warn_unused_result))
#else
# define V8_WARN_UNUSED_RESULT /* NOT SUPPORTED */
#endif
// A macro to provide the compiler with branch prediction information. // A macro to provide the compiler with branch prediction information.
#if V8_HAS_BUILTIN_EXPECT #if V8_HAS_BUILTIN_EXPECT
# define V8_UNLIKELY(condition) (__builtin_expect(!!(condition), 0)) # define V8_UNLIKELY(condition) (__builtin_expect(!!(condition), 0))
@ -369,33 +364,6 @@ declarator __attribute__((deprecated))
#endif #endif
// Annotate a virtual method indicating it must be overriding a virtual
// method in the parent class.
// Use like:
// virtual void bar() V8_OVERRIDE;
#if V8_HAS_CXX11_OVERRIDE
# define V8_OVERRIDE override
#else
# define V8_OVERRIDE /* NOT SUPPORTED */
#endif
// Annotate a virtual method indicating that subclasses must not override it,
// or annotate a class to indicate that it cannot be subclassed.
// Use like:
// class B V8_FINAL : public A {};
// virtual void bar() V8_FINAL;
#if V8_HAS_CXX11_FINAL
# define V8_FINAL final
#elif V8_HAS___FINAL
# define V8_FINAL __final
#elif V8_HAS_SEALED
# define V8_FINAL sealed
#else
# define V8_FINAL /* NOT SUPPORTED */
#endif
// This macro allows to specify memory alignment for structs, classes, etc. // This macro allows to specify memory alignment for structs, classes, etc.
// Use like: // Use like:
// class V8_ALIGNED(16) MyClass { ... }; // class V8_ALIGNED(16) MyClass { ... };

View File

@ -257,6 +257,7 @@ int main(int argc, char* argv[]) {
v8::V8::InitializeICU(); v8::V8::InitializeICU();
v8::Platform* platform = v8::platform::CreateDefaultPlatform(); v8::Platform* platform = v8::platform::CreateDefaultPlatform();
v8::V8::InitializePlatform(platform); v8::V8::InitializePlatform(platform);
v8::V8::Initialize();
int result = RunMain(argc, argv); int result = RunMain(argc, argv);
v8::V8::Dispose(); v8::V8::Dispose();
v8::V8::ShutdownPlatform(); v8::V8::ShutdownPlatform();

View File

@ -648,6 +648,7 @@ int main(int argc, char* argv[]) {
v8::V8::InitializeICU(); v8::V8::InitializeICU();
v8::Platform* platform = v8::platform::CreateDefaultPlatform(); v8::Platform* platform = v8::platform::CreateDefaultPlatform();
v8::V8::InitializePlatform(platform); v8::V8::InitializePlatform(platform);
v8::V8::Initialize();
map<string, string> options; map<string, string> options;
string file; string file;
ParseOptions(argc, argv, &options, &file); ParseOptions(argc, argv, &options, &file);

View File

@ -83,6 +83,7 @@ int main(int argc, char* argv[]) {
v8::V8::InitializeICU(); v8::V8::InitializeICU();
v8::Platform* platform = v8::platform::CreateDefaultPlatform(); v8::Platform* platform = v8::platform::CreateDefaultPlatform();
v8::V8::InitializePlatform(platform); v8::V8::InitializePlatform(platform);
v8::V8::Initialize();
v8::V8::SetFlagsFromCommandLine(&argc, argv, true); v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
ShellArrayBufferAllocator array_buffer_allocator; ShellArrayBufferAllocator array_buffer_allocator;
v8::V8::SetArrayBufferAllocator(&array_buffer_allocator); v8::V8::SetArrayBufferAllocator(&array_buffer_allocator);

1
deps/v8/src/DEPS vendored
View File

@ -4,6 +4,7 @@ include_rules = [
"+src/compiler/pipeline.h", "+src/compiler/pipeline.h",
"-src/libplatform", "-src/libplatform",
"-include/libplatform", "-include/libplatform",
"+testing",
] ]
specific_include_rules = { specific_include_rules = {

View File

@ -23,9 +23,9 @@ namespace internal {
Handle<AccessorInfo> Accessors::MakeAccessor( Handle<AccessorInfo> Accessors::MakeAccessor(
Isolate* isolate, Isolate* isolate,
Handle<String> name, Handle<Name> name,
AccessorGetterCallback getter, AccessorNameGetterCallback getter,
AccessorSetterCallback setter, AccessorNameSetterCallback setter,
PropertyAttributes attributes) { PropertyAttributes attributes) {
Factory* factory = isolate->factory(); Factory* factory = isolate->factory();
Handle<ExecutableAccessorInfo> info = factory->NewExecutableAccessorInfo(); Handle<ExecutableAccessorInfo> info = factory->NewExecutableAccessorInfo();
@ -138,7 +138,7 @@ bool Accessors::IsJSObjectFieldAccessor<HeapType>(Handle<HeapType> type,
bool SetPropertyOnInstanceIfInherited( bool SetPropertyOnInstanceIfInherited(
Isolate* isolate, const v8::PropertyCallbackInfo<void>& info, Isolate* isolate, const v8::PropertyCallbackInfo<void>& info,
v8::Local<v8::String> name, Handle<Object> value) { v8::Local<v8::Name> name, Handle<Object> value) {
Handle<Object> holder = Utils::OpenHandle(*info.Holder()); Handle<Object> holder = Utils::OpenHandle(*info.Holder());
Handle<Object> receiver = Utils::OpenHandle(*info.This()); Handle<Object> receiver = Utils::OpenHandle(*info.This());
if (*holder == *receiver) return false; if (*holder == *receiver) return false;
@ -155,6 +155,46 @@ bool SetPropertyOnInstanceIfInherited(
} }
//
// Accessors::ArgumentsIterator
//
void Accessors::ArgumentsIteratorGetter(
v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation;
HandleScope scope(isolate);
Object* result = isolate->native_context()->array_values_iterator();
info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(result, isolate)));
}
void Accessors::ArgumentsIteratorSetter(
v8::Local<v8::Name> name, v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<void>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate);
Handle<JSObject> object = Utils::OpenHandle(*info.This());
Handle<Object> value = Utils::OpenHandle(*val);
if (SetPropertyOnInstanceIfInherited(isolate, info, name, value)) return;
LookupIterator it(object, Utils::OpenHandle(*name));
CHECK_EQ(LookupIterator::ACCESSOR, it.state());
DCHECK(it.HolderIsReceiverOrHiddenPrototype());
Object::SetDataProperty(&it, value);
}
Handle<AccessorInfo> Accessors::ArgumentsIteratorInfo(
Isolate* isolate, PropertyAttributes attributes) {
Handle<Name> name(isolate->native_context()->iterator_symbol(), isolate);
return MakeAccessor(isolate, name, &ArgumentsIteratorGetter,
&ArgumentsIteratorSetter, attributes);
}
// //
// Accessors::ArrayLength // Accessors::ArrayLength
// //
@ -176,7 +216,7 @@ Handle<Object> Accessors::FlattenNumber(Isolate* isolate,
void Accessors::ArrayLengthGetter( void Accessors::ArrayLengthGetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) { const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation; DisallowHeapAllocation no_allocation;
@ -188,7 +228,7 @@ void Accessors::ArrayLengthGetter(
void Accessors::ArrayLengthSetter( void Accessors::ArrayLengthSetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
v8::Local<v8::Value> val, v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<void>& info) { const v8::PropertyCallbackInfo<void>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
@ -222,9 +262,15 @@ void Accessors::ArrayLengthSetter(
return; return;
} }
isolate->ScheduleThrow( Handle<Object> exception;
*isolate->factory()->NewRangeError("invalid_array_length", maybe = isolate->factory()->NewRangeError("invalid_array_length",
HandleVector<Object>(NULL, 0))); HandleVector<Object>(NULL, 0));
if (!maybe.ToHandle(&exception)) {
isolate->OptionalRescheduleException(false);
return;
}
isolate->ScheduleThrow(*exception);
} }
@ -244,7 +290,7 @@ Handle<AccessorInfo> Accessors::ArrayLengthInfo(
// //
void Accessors::StringLengthGetter( void Accessors::StringLengthGetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) { const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation; DisallowHeapAllocation no_allocation;
@ -267,7 +313,7 @@ void Accessors::StringLengthGetter(
void Accessors::StringLengthSetter( void Accessors::StringLengthSetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
v8::Local<v8::Value> value, v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<void>& info) { const v8::PropertyCallbackInfo<void>& info) {
UNREACHABLE(); UNREACHABLE();
@ -290,7 +336,7 @@ Handle<AccessorInfo> Accessors::StringLengthInfo(
void Accessors::ScriptColumnOffsetGetter( void Accessors::ScriptColumnOffsetGetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) { const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation; DisallowHeapAllocation no_allocation;
@ -302,7 +348,7 @@ void Accessors::ScriptColumnOffsetGetter(
void Accessors::ScriptColumnOffsetSetter( void Accessors::ScriptColumnOffsetSetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
v8::Local<v8::Value> value, v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<void>& info) { const v8::PropertyCallbackInfo<void>& info) {
UNREACHABLE(); UNREACHABLE();
@ -312,7 +358,7 @@ void Accessors::ScriptColumnOffsetSetter(
Handle<AccessorInfo> Accessors::ScriptColumnOffsetInfo( Handle<AccessorInfo> Accessors::ScriptColumnOffsetInfo(
Isolate* isolate, PropertyAttributes attributes) { Isolate* isolate, PropertyAttributes attributes) {
Handle<String> name(isolate->factory()->InternalizeOneByteString( Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("column_offset"))); STATIC_CHAR_VECTOR("column_offset")));
return MakeAccessor(isolate, return MakeAccessor(isolate,
name, name,
&ScriptColumnOffsetGetter, &ScriptColumnOffsetGetter,
@ -327,7 +373,7 @@ Handle<AccessorInfo> Accessors::ScriptColumnOffsetInfo(
void Accessors::ScriptIdGetter( void Accessors::ScriptIdGetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) { const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation; DisallowHeapAllocation no_allocation;
@ -339,7 +385,7 @@ void Accessors::ScriptIdGetter(
void Accessors::ScriptIdSetter( void Accessors::ScriptIdSetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
v8::Local<v8::Value> value, v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<void>& info) { const v8::PropertyCallbackInfo<void>& info) {
UNREACHABLE(); UNREACHABLE();
@ -348,8 +394,8 @@ void Accessors::ScriptIdSetter(
Handle<AccessorInfo> Accessors::ScriptIdInfo( Handle<AccessorInfo> Accessors::ScriptIdInfo(
Isolate* isolate, PropertyAttributes attributes) { Isolate* isolate, PropertyAttributes attributes) {
Handle<String> name(isolate->factory()->InternalizeOneByteString( Handle<String> name(
STATIC_ASCII_VECTOR("id"))); isolate->factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("id")));
return MakeAccessor(isolate, return MakeAccessor(isolate,
name, name,
&ScriptIdGetter, &ScriptIdGetter,
@ -364,7 +410,7 @@ Handle<AccessorInfo> Accessors::ScriptIdInfo(
void Accessors::ScriptNameGetter( void Accessors::ScriptNameGetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) { const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation; DisallowHeapAllocation no_allocation;
@ -376,7 +422,7 @@ void Accessors::ScriptNameGetter(
void Accessors::ScriptNameSetter( void Accessors::ScriptNameSetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
v8::Local<v8::Value> value, v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<void>& info) { const v8::PropertyCallbackInfo<void>& info) {
UNREACHABLE(); UNREACHABLE();
@ -399,7 +445,7 @@ Handle<AccessorInfo> Accessors::ScriptNameInfo(
void Accessors::ScriptSourceGetter( void Accessors::ScriptSourceGetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) { const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation; DisallowHeapAllocation no_allocation;
@ -411,7 +457,7 @@ void Accessors::ScriptSourceGetter(
void Accessors::ScriptSourceSetter( void Accessors::ScriptSourceSetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
v8::Local<v8::Value> value, v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<void>& info) { const v8::PropertyCallbackInfo<void>& info) {
UNREACHABLE(); UNREACHABLE();
@ -434,7 +480,7 @@ Handle<AccessorInfo> Accessors::ScriptSourceInfo(
void Accessors::ScriptLineOffsetGetter( void Accessors::ScriptLineOffsetGetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) { const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation; DisallowHeapAllocation no_allocation;
@ -446,7 +492,7 @@ void Accessors::ScriptLineOffsetGetter(
void Accessors::ScriptLineOffsetSetter( void Accessors::ScriptLineOffsetSetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
v8::Local<v8::Value> value, v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<void>& info) { const v8::PropertyCallbackInfo<void>& info) {
UNREACHABLE(); UNREACHABLE();
@ -456,7 +502,7 @@ void Accessors::ScriptLineOffsetSetter(
Handle<AccessorInfo> Accessors::ScriptLineOffsetInfo( Handle<AccessorInfo> Accessors::ScriptLineOffsetInfo(
Isolate* isolate, PropertyAttributes attributes) { Isolate* isolate, PropertyAttributes attributes) {
Handle<String> name(isolate->factory()->InternalizeOneByteString( Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("line_offset"))); STATIC_CHAR_VECTOR("line_offset")));
return MakeAccessor(isolate, return MakeAccessor(isolate,
name, name,
&ScriptLineOffsetGetter, &ScriptLineOffsetGetter,
@ -471,7 +517,7 @@ Handle<AccessorInfo> Accessors::ScriptLineOffsetInfo(
void Accessors::ScriptTypeGetter( void Accessors::ScriptTypeGetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) { const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation; DisallowHeapAllocation no_allocation;
@ -483,7 +529,7 @@ void Accessors::ScriptTypeGetter(
void Accessors::ScriptTypeSetter( void Accessors::ScriptTypeSetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
v8::Local<v8::Value> value, v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<void>& info) { const v8::PropertyCallbackInfo<void>& info) {
UNREACHABLE(); UNREACHABLE();
@ -492,8 +538,8 @@ void Accessors::ScriptTypeSetter(
Handle<AccessorInfo> Accessors::ScriptTypeInfo( Handle<AccessorInfo> Accessors::ScriptTypeInfo(
Isolate* isolate, PropertyAttributes attributes) { Isolate* isolate, PropertyAttributes attributes) {
Handle<String> name(isolate->factory()->InternalizeOneByteString( Handle<String> name(
STATIC_ASCII_VECTOR("type"))); isolate->factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("type")));
return MakeAccessor(isolate, return MakeAccessor(isolate,
name, name,
&ScriptTypeGetter, &ScriptTypeGetter,
@ -508,7 +554,7 @@ Handle<AccessorInfo> Accessors::ScriptTypeInfo(
void Accessors::ScriptCompilationTypeGetter( void Accessors::ScriptCompilationTypeGetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) { const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation; DisallowHeapAllocation no_allocation;
@ -521,7 +567,7 @@ void Accessors::ScriptCompilationTypeGetter(
void Accessors::ScriptCompilationTypeSetter( void Accessors::ScriptCompilationTypeSetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
v8::Local<v8::Value> value, v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<void>& info) { const v8::PropertyCallbackInfo<void>& info) {
UNREACHABLE(); UNREACHABLE();
@ -531,7 +577,7 @@ void Accessors::ScriptCompilationTypeSetter(
Handle<AccessorInfo> Accessors::ScriptCompilationTypeInfo( Handle<AccessorInfo> Accessors::ScriptCompilationTypeInfo(
Isolate* isolate, PropertyAttributes attributes) { Isolate* isolate, PropertyAttributes attributes) {
Handle<String> name(isolate->factory()->InternalizeOneByteString( Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("compilation_type"))); STATIC_CHAR_VECTOR("compilation_type")));
return MakeAccessor(isolate, return MakeAccessor(isolate,
name, name,
&ScriptCompilationTypeGetter, &ScriptCompilationTypeGetter,
@ -546,7 +592,7 @@ Handle<AccessorInfo> Accessors::ScriptCompilationTypeInfo(
void Accessors::ScriptLineEndsGetter( void Accessors::ScriptLineEndsGetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) { const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate); HandleScope scope(isolate);
@ -566,7 +612,7 @@ void Accessors::ScriptLineEndsGetter(
void Accessors::ScriptLineEndsSetter( void Accessors::ScriptLineEndsSetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
v8::Local<v8::Value> value, v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<void>& info) { const v8::PropertyCallbackInfo<void>& info) {
UNREACHABLE(); UNREACHABLE();
@ -576,7 +622,7 @@ void Accessors::ScriptLineEndsSetter(
Handle<AccessorInfo> Accessors::ScriptLineEndsInfo( Handle<AccessorInfo> Accessors::ScriptLineEndsInfo(
Isolate* isolate, PropertyAttributes attributes) { Isolate* isolate, PropertyAttributes attributes) {
Handle<String> name(isolate->factory()->InternalizeOneByteString( Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("line_ends"))); STATIC_CHAR_VECTOR("line_ends")));
return MakeAccessor(isolate, return MakeAccessor(isolate,
name, name,
&ScriptLineEndsGetter, &ScriptLineEndsGetter,
@ -591,7 +637,7 @@ Handle<AccessorInfo> Accessors::ScriptLineEndsInfo(
void Accessors::ScriptSourceUrlGetter( void Accessors::ScriptSourceUrlGetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) { const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation; DisallowHeapAllocation no_allocation;
@ -603,7 +649,7 @@ void Accessors::ScriptSourceUrlGetter(
void Accessors::ScriptSourceUrlSetter( void Accessors::ScriptSourceUrlSetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
v8::Local<v8::Value> value, v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<void>& info) { const v8::PropertyCallbackInfo<void>& info) {
UNREACHABLE(); UNREACHABLE();
@ -626,7 +672,7 @@ Handle<AccessorInfo> Accessors::ScriptSourceUrlInfo(
void Accessors::ScriptSourceMappingUrlGetter( void Accessors::ScriptSourceMappingUrlGetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) { const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation; DisallowHeapAllocation no_allocation;
@ -639,7 +685,7 @@ void Accessors::ScriptSourceMappingUrlGetter(
void Accessors::ScriptSourceMappingUrlSetter( void Accessors::ScriptSourceMappingUrlSetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
v8::Local<v8::Value> value, v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<void>& info) { const v8::PropertyCallbackInfo<void>& info) {
UNREACHABLE(); UNREACHABLE();
@ -662,7 +708,7 @@ Handle<AccessorInfo> Accessors::ScriptSourceMappingUrlInfo(
void Accessors::ScriptContextDataGetter( void Accessors::ScriptContextDataGetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) { const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
DisallowHeapAllocation no_allocation; DisallowHeapAllocation no_allocation;
@ -674,7 +720,7 @@ void Accessors::ScriptContextDataGetter(
void Accessors::ScriptContextDataSetter( void Accessors::ScriptContextDataSetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
v8::Local<v8::Value> value, v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<void>& info) { const v8::PropertyCallbackInfo<void>& info) {
UNREACHABLE(); UNREACHABLE();
@ -684,7 +730,7 @@ void Accessors::ScriptContextDataSetter(
Handle<AccessorInfo> Accessors::ScriptContextDataInfo( Handle<AccessorInfo> Accessors::ScriptContextDataInfo(
Isolate* isolate, PropertyAttributes attributes) { Isolate* isolate, PropertyAttributes attributes) {
Handle<String> name(isolate->factory()->InternalizeOneByteString( Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("context_data"))); STATIC_CHAR_VECTOR("context_data")));
return MakeAccessor(isolate, return MakeAccessor(isolate,
name, name,
&ScriptContextDataGetter, &ScriptContextDataGetter,
@ -699,7 +745,7 @@ Handle<AccessorInfo> Accessors::ScriptContextDataInfo(
void Accessors::ScriptEvalFromScriptGetter( void Accessors::ScriptEvalFromScriptGetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) { const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate); HandleScope scope(isolate);
@ -721,7 +767,7 @@ void Accessors::ScriptEvalFromScriptGetter(
void Accessors::ScriptEvalFromScriptSetter( void Accessors::ScriptEvalFromScriptSetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
v8::Local<v8::Value> value, v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<void>& info) { const v8::PropertyCallbackInfo<void>& info) {
UNREACHABLE(); UNREACHABLE();
@ -731,7 +777,7 @@ void Accessors::ScriptEvalFromScriptSetter(
Handle<AccessorInfo> Accessors::ScriptEvalFromScriptInfo( Handle<AccessorInfo> Accessors::ScriptEvalFromScriptInfo(
Isolate* isolate, PropertyAttributes attributes) { Isolate* isolate, PropertyAttributes attributes) {
Handle<String> name(isolate->factory()->InternalizeOneByteString( Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("eval_from_script"))); STATIC_CHAR_VECTOR("eval_from_script")));
return MakeAccessor(isolate, return MakeAccessor(isolate,
name, name,
&ScriptEvalFromScriptGetter, &ScriptEvalFromScriptGetter,
@ -746,7 +792,7 @@ Handle<AccessorInfo> Accessors::ScriptEvalFromScriptInfo(
void Accessors::ScriptEvalFromScriptPositionGetter( void Accessors::ScriptEvalFromScriptPositionGetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) { const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate); HandleScope scope(isolate);
@ -767,7 +813,7 @@ void Accessors::ScriptEvalFromScriptPositionGetter(
void Accessors::ScriptEvalFromScriptPositionSetter( void Accessors::ScriptEvalFromScriptPositionSetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
v8::Local<v8::Value> value, v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<void>& info) { const v8::PropertyCallbackInfo<void>& info) {
UNREACHABLE(); UNREACHABLE();
@ -777,7 +823,7 @@ void Accessors::ScriptEvalFromScriptPositionSetter(
Handle<AccessorInfo> Accessors::ScriptEvalFromScriptPositionInfo( Handle<AccessorInfo> Accessors::ScriptEvalFromScriptPositionInfo(
Isolate* isolate, PropertyAttributes attributes) { Isolate* isolate, PropertyAttributes attributes) {
Handle<String> name(isolate->factory()->InternalizeOneByteString( Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("eval_from_script_position"))); STATIC_CHAR_VECTOR("eval_from_script_position")));
return MakeAccessor(isolate, return MakeAccessor(isolate,
name, name,
&ScriptEvalFromScriptPositionGetter, &ScriptEvalFromScriptPositionGetter,
@ -792,7 +838,7 @@ Handle<AccessorInfo> Accessors::ScriptEvalFromScriptPositionInfo(
void Accessors::ScriptEvalFromFunctionNameGetter( void Accessors::ScriptEvalFromFunctionNameGetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) { const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate); HandleScope scope(isolate);
@ -813,7 +859,7 @@ void Accessors::ScriptEvalFromFunctionNameGetter(
void Accessors::ScriptEvalFromFunctionNameSetter( void Accessors::ScriptEvalFromFunctionNameSetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
v8::Local<v8::Value> value, v8::Local<v8::Value> value,
const v8::PropertyCallbackInfo<void>& info) { const v8::PropertyCallbackInfo<void>& info) {
UNREACHABLE(); UNREACHABLE();
@ -823,7 +869,7 @@ void Accessors::ScriptEvalFromFunctionNameSetter(
Handle<AccessorInfo> Accessors::ScriptEvalFromFunctionNameInfo( Handle<AccessorInfo> Accessors::ScriptEvalFromFunctionNameInfo(
Isolate* isolate, PropertyAttributes attributes) { Isolate* isolate, PropertyAttributes attributes) {
Handle<String> name(isolate->factory()->InternalizeOneByteString( Handle<String> name(isolate->factory()->InternalizeOneByteString(
STATIC_ASCII_VECTOR("eval_from_function_name"))); STATIC_CHAR_VECTOR("eval_from_function_name")));
return MakeAccessor(isolate, return MakeAccessor(isolate,
name, name,
&ScriptEvalFromFunctionNameGetter, &ScriptEvalFromFunctionNameGetter,
@ -884,7 +930,7 @@ Handle<Object> Accessors::FunctionSetPrototype(Handle<JSFunction> function,
void Accessors::FunctionPrototypeGetter( void Accessors::FunctionPrototypeGetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) { const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate); HandleScope scope(isolate);
@ -896,7 +942,7 @@ void Accessors::FunctionPrototypeGetter(
void Accessors::FunctionPrototypeSetter( void Accessors::FunctionPrototypeSetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
v8::Local<v8::Value> val, v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<void>& info) { const v8::PropertyCallbackInfo<void>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
@ -927,7 +973,7 @@ Handle<AccessorInfo> Accessors::FunctionPrototypeInfo(
void Accessors::FunctionLengthGetter( void Accessors::FunctionLengthGetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) { const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate); HandleScope scope(isolate);
@ -953,7 +999,7 @@ void Accessors::FunctionLengthGetter(
void Accessors::FunctionLengthSetter( void Accessors::FunctionLengthSetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
v8::Local<v8::Value> val, v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<void>& info) { const v8::PropertyCallbackInfo<void>& info) {
// Function length is non writable, non configurable. // Function length is non writable, non configurable.
@ -977,7 +1023,7 @@ Handle<AccessorInfo> Accessors::FunctionLengthInfo(
void Accessors::FunctionNameGetter( void Accessors::FunctionNameGetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) { const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate); HandleScope scope(isolate);
@ -989,7 +1035,7 @@ void Accessors::FunctionNameGetter(
void Accessors::FunctionNameSetter( void Accessors::FunctionNameSetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
v8::Local<v8::Value> val, v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<void>& info) { const v8::PropertyCallbackInfo<void>& info) {
// Function name is non writable, non configurable. // Function name is non writable, non configurable.
@ -1114,7 +1160,7 @@ Handle<Object> Accessors::FunctionGetArguments(Handle<JSFunction> function) {
void Accessors::FunctionArgumentsGetter( void Accessors::FunctionArgumentsGetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) { const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate); HandleScope scope(isolate);
@ -1126,7 +1172,7 @@ void Accessors::FunctionArgumentsGetter(
void Accessors::FunctionArgumentsSetter( void Accessors::FunctionArgumentsSetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
v8::Local<v8::Value> val, v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<void>& info) { const v8::PropertyCallbackInfo<void>& info) {
// Function arguments is non writable, non configurable. // Function arguments is non writable, non configurable.
@ -1257,7 +1303,7 @@ MaybeHandle<JSFunction> FindCaller(Isolate* isolate,
void Accessors::FunctionCallerGetter( void Accessors::FunctionCallerGetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
const v8::PropertyCallbackInfo<v8::Value>& info) { const v8::PropertyCallbackInfo<v8::Value>& info) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate()); i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
HandleScope scope(isolate); HandleScope scope(isolate);
@ -1277,7 +1323,7 @@ void Accessors::FunctionCallerGetter(
void Accessors::FunctionCallerSetter( void Accessors::FunctionCallerSetter(
v8::Local<v8::String> name, v8::Local<v8::Name> name,
v8::Local<v8::Value> val, v8::Local<v8::Value> val,
const v8::PropertyCallbackInfo<void>& info) { const v8::PropertyCallbackInfo<void>& info) {
// Function caller is non writable, non configurable. // Function caller is non writable, non configurable.
@ -1310,9 +1356,16 @@ static void ModuleGetExport(
Isolate* isolate = instance->GetIsolate(); Isolate* isolate = instance->GetIsolate();
if (value->IsTheHole()) { if (value->IsTheHole()) {
Handle<String> name = v8::Utils::OpenHandle(*property); Handle<String> name = v8::Utils::OpenHandle(*property);
isolate->ScheduleThrow(
*isolate->factory()->NewReferenceError("not_defined", Handle<Object> exception;
HandleVector(&name, 1))); MaybeHandle<Object> maybe = isolate->factory()->NewReferenceError(
"not_defined", HandleVector(&name, 1));
if (!maybe.ToHandle(&exception)) {
isolate->OptionalRescheduleException(false);
return;
}
isolate->ScheduleThrow(*exception);
return; return;
} }
info.GetReturnValue().Set(v8::Utils::ToLocal(Handle<Object>(value, isolate))); info.GetReturnValue().Set(v8::Utils::ToLocal(Handle<Object>(value, isolate)));
@ -1328,12 +1381,18 @@ static void ModuleSetExport(
DCHECK(context->IsModuleContext()); DCHECK(context->IsModuleContext());
int slot = info.Data()->Int32Value(); int slot = info.Data()->Int32Value();
Object* old_value = context->get(slot); Object* old_value = context->get(slot);
Isolate* isolate = context->GetIsolate();
if (old_value->IsTheHole()) { if (old_value->IsTheHole()) {
Handle<String> name = v8::Utils::OpenHandle(*property); Handle<String> name = v8::Utils::OpenHandle(*property);
Isolate* isolate = instance->GetIsolate(); Handle<Object> exception;
isolate->ScheduleThrow( MaybeHandle<Object> maybe = isolate->factory()->NewReferenceError(
*isolate->factory()->NewReferenceError("not_defined", "not_defined", HandleVector(&name, 1));
HandleVector(&name, 1))); if (!maybe.ToHandle(&exception)) {
isolate->OptionalRescheduleException(false);
return;
}
isolate->ScheduleThrow(*exception);
return; return;
} }
context->set(slot, *v8::Utils::OpenHandle(*value)); context->set(slot, *v8::Utils::OpenHandle(*value));

View File

@ -14,6 +14,7 @@ namespace internal {
// The list of accessor descriptors. This is a second-order macro // The list of accessor descriptors. This is a second-order macro
// taking a macro to be applied to all accessor descriptor names. // taking a macro to be applied to all accessor descriptor names.
#define ACCESSOR_INFO_LIST(V) \ #define ACCESSOR_INFO_LIST(V) \
V(ArgumentsIterator) \
V(ArrayLength) \ V(ArrayLength) \
V(FunctionArguments) \ V(FunctionArguments) \
V(FunctionCaller) \ V(FunctionCaller) \
@ -43,10 +44,10 @@ class Accessors : public AllStatic {
// Accessor descriptors. // Accessor descriptors.
#define ACCESSOR_INFO_DECLARATION(name) \ #define ACCESSOR_INFO_DECLARATION(name) \
static void name##Getter( \ static void name##Getter( \
v8::Local<v8::String> name, \ v8::Local<v8::Name> name, \
const v8::PropertyCallbackInfo<v8::Value>& info); \ const v8::PropertyCallbackInfo<v8::Value>& info); \
static void name##Setter( \ static void name##Setter( \
v8::Local<v8::String> name, \ v8::Local<v8::Name> name, \
v8::Local<v8::Value> value, \ v8::Local<v8::Value> value, \
const v8::PropertyCallbackInfo<void>& info); \ const v8::PropertyCallbackInfo<void>& info); \
static Handle<AccessorInfo> name##Info( \ static Handle<AccessorInfo> name##Info( \
@ -83,9 +84,9 @@ class Accessors : public AllStatic {
static Handle<AccessorInfo> MakeAccessor( static Handle<AccessorInfo> MakeAccessor(
Isolate* isolate, Isolate* isolate,
Handle<String> name, Handle<Name> name,
AccessorGetterCallback getter, AccessorNameGetterCallback getter,
AccessorSetterCallback setter, AccessorNameSetterCallback setter,
PropertyAttributes attributes); PropertyAttributes attributes);
static Handle<ExecutableAccessorInfo> CloneAccessor( static Handle<ExecutableAccessorInfo> CloneAccessor(

View File

@ -5,6 +5,7 @@
#include "src/allocation.h" #include "src/allocation.h"
#include <stdlib.h> // For free, malloc. #include <stdlib.h> // For free, malloc.
#include "src/base/bits.h"
#include "src/base/logging.h" #include "src/base/logging.h"
#include "src/base/platform/platform.h" #include "src/base/platform/platform.h"
#include "src/utils.h" #include "src/utils.h"
@ -83,7 +84,8 @@ char* StrNDup(const char* str, int n) {
void* AlignedAlloc(size_t size, size_t alignment) { void* AlignedAlloc(size_t size, size_t alignment) {
DCHECK(IsPowerOf2(alignment) && alignment >= V8_ALIGNOF(void*)); // NOLINT DCHECK_LE(V8_ALIGNOF(void*), alignment);
DCHECK(base::bits::IsPowerOfTwo32(alignment));
void* ptr; void* ptr;
#if V8_OS_WIN #if V8_OS_WIN
ptr = _aligned_malloc(size, alignment); ptr = _aligned_malloc(size, alignment);

762
deps/v8/src/api.cc vendored

File diff suppressed because it is too large Load Diff

8
deps/v8/src/api.h vendored
View File

@ -158,6 +158,7 @@ class RegisteredExtension {
V(Float32Array, JSTypedArray) \ V(Float32Array, JSTypedArray) \
V(Float64Array, JSTypedArray) \ V(Float64Array, JSTypedArray) \
V(DataView, JSDataView) \ V(DataView, JSDataView) \
V(Name, Name) \
V(String, String) \ V(String, String) \
V(Symbol, Symbol) \ V(Symbol, Symbol) \
V(Script, JSFunction) \ V(Script, JSFunction) \
@ -189,6 +190,8 @@ class Utils {
v8::internal::Handle<v8::internal::Object> obj); v8::internal::Handle<v8::internal::Object> obj);
static inline Local<Function> ToLocal( static inline Local<Function> ToLocal(
v8::internal::Handle<v8::internal::JSFunction> obj); v8::internal::Handle<v8::internal::JSFunction> obj);
static inline Local<Name> ToLocal(
v8::internal::Handle<v8::internal::Name> obj);
static inline Local<String> ToLocal( static inline Local<String> ToLocal(
v8::internal::Handle<v8::internal::String> obj); v8::internal::Handle<v8::internal::String> obj);
static inline Local<Symbol> ToLocal( static inline Local<Symbol> ToLocal(
@ -333,6 +336,7 @@ inline v8::Local<T> ToApiHandle(
MAKE_TO_LOCAL(ToLocal, Context, Context) MAKE_TO_LOCAL(ToLocal, Context, Context)
MAKE_TO_LOCAL(ToLocal, Object, Value) MAKE_TO_LOCAL(ToLocal, Object, Value)
MAKE_TO_LOCAL(ToLocal, JSFunction, Function) MAKE_TO_LOCAL(ToLocal, JSFunction, Function)
MAKE_TO_LOCAL(ToLocal, Name, Name)
MAKE_TO_LOCAL(ToLocal, String, String) MAKE_TO_LOCAL(ToLocal, String, String)
MAKE_TO_LOCAL(ToLocal, Symbol, Symbol) MAKE_TO_LOCAL(ToLocal, Symbol, Symbol)
MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp) MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp)
@ -671,9 +675,9 @@ void HandleScopeImplementer::DeleteExtensions(internal::Object** prev_limit) {
// Interceptor functions called from generated inline caches to notify // Interceptor functions called from generated inline caches to notify
// CPU profiler that external callbacks are invoked. // CPU profiler that external callbacks are invoked.
void InvokeAccessorGetterCallback( void InvokeAccessorGetterCallback(
v8::Local<v8::String> property, v8::Local<v8::Name> property,
const v8::PropertyCallbackInfo<v8::Value>& info, const v8::PropertyCallbackInfo<v8::Value>& info,
v8::AccessorGetterCallback getter); v8::AccessorNameGetterCallback getter);
void InvokeFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>& info, void InvokeFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
v8::FunctionCallback callback); v8::FunctionCallback callback);

View File

@ -72,7 +72,7 @@ function InstantiateFunction(data, name) {
} }
} }
var fun = %CreateApiFunction(data, prototype); var fun = %CreateApiFunction(data, prototype);
if (name) %FunctionSetName(fun, name); if (IS_STRING(name)) %FunctionSetName(fun, name);
var doNotCache = flags & (1 << kDoNotCacheBit); var doNotCache = flags & (1 << kDoNotCacheBit);
if (!doNotCache) cache[serialNumber] = fun; if (!doNotCache) cache[serialNumber] = fun;
ConfigureTemplateInstance(fun, data); ConfigureTemplateInstance(fun, data);

View File

@ -68,13 +68,13 @@ class Arguments BASE_EMBEDDED {
// They are used to generate the Call() functions below // They are used to generate the Call() functions below
// These aren't included in the list as they have duplicate signatures // These aren't included in the list as they have duplicate signatures
// F(NamedPropertyEnumeratorCallback, ...) // F(NamedPropertyEnumeratorCallback, ...)
// F(NamedPropertyGetterCallback, ...)
#define FOR_EACH_CALLBACK_TABLE_MAPPING_0(F) \ #define FOR_EACH_CALLBACK_TABLE_MAPPING_0(F) \
F(IndexedPropertyEnumeratorCallback, v8::Array) \ F(IndexedPropertyEnumeratorCallback, v8::Array) \
#define FOR_EACH_CALLBACK_TABLE_MAPPING_1(F) \ #define FOR_EACH_CALLBACK_TABLE_MAPPING_1(F) \
F(AccessorGetterCallback, v8::Value, v8::Local<v8::String>) \ F(NamedPropertyGetterCallback, v8::Value, v8::Local<v8::String>) \
F(AccessorNameGetterCallback, v8::Value, v8::Local<v8::Name>) \
F(NamedPropertyQueryCallback, \ F(NamedPropertyQueryCallback, \
v8::Integer, \ v8::Integer, \
v8::Local<v8::String>) \ v8::Local<v8::String>) \
@ -102,9 +102,9 @@ class Arguments BASE_EMBEDDED {
v8::Local<v8::Value>) \ v8::Local<v8::Value>) \
#define FOR_EACH_CALLBACK_TABLE_MAPPING_2_VOID_RETURN(F) \ #define FOR_EACH_CALLBACK_TABLE_MAPPING_2_VOID_RETURN(F) \
F(AccessorSetterCallback, \ F(AccessorNameSetterCallback, \
void, \ void, \
v8::Local<v8::String>, \ v8::Local<v8::Name>, \
v8::Local<v8::Value>) \ v8::Local<v8::Value>) \

View File

@ -70,6 +70,12 @@ int DwVfpRegister::NumAllocatableRegisters() {
} }
// static
int DwVfpRegister::NumAllocatableAliasedRegisters() {
return LowDwVfpRegister::kMaxNumLowRegisters - kNumReservedRegisters;
}
int DwVfpRegister::ToAllocationIndex(DwVfpRegister reg) { int DwVfpRegister::ToAllocationIndex(DwVfpRegister reg) {
DCHECK(!reg.is(kDoubleRegZero)); DCHECK(!reg.is(kDoubleRegZero));
DCHECK(!reg.is(kScratchDoubleReg)); DCHECK(!reg.is(kScratchDoubleReg));
@ -428,31 +434,53 @@ Address Assembler::target_address_from_return_address(Address pc) {
// movt ip, #... @ call address high 16 // movt ip, #... @ call address high 16
// blx ip // blx ip
// @ return address // @ return address
// Or pre-V7 or cases that need frequent patching, the address is in the // For V6 when the constant pool is unavailable, it is:
// mov ip, #... @ call address low 8
// orr ip, ip, #... @ call address 2nd 8
// orr ip, ip, #... @ call address 3rd 8
// orr ip, ip, #... @ call address high 8
// blx ip
// @ return address
// In cases that need frequent patching, the address is in the
// constant pool. It could be a small constant pool load: // constant pool. It could be a small constant pool load:
// ldr ip, [pc / pp, #...] @ call address // ldr ip, [pc / pp, #...] @ call address
// blx ip // blx ip
// @ return address // @ return address
// Or an extended constant pool load: // Or an extended constant pool load (ARMv7):
// movw ip, #... // movw ip, #...
// movt ip, #... // movt ip, #...
// ldr ip, [pc, ip] @ call address // ldr ip, [pc, ip] @ call address
// blx ip // blx ip
// @ return address // @ return address
// Or an extended constant pool load (ARMv6):
// mov ip, #...
// orr ip, ip, #...
// orr ip, ip, #...
// orr ip, ip, #...
// ldr ip, [pc, ip] @ call address
// blx ip
// @ return address
Address candidate = pc - 2 * Assembler::kInstrSize; Address candidate = pc - 2 * Assembler::kInstrSize;
Instr candidate_instr(Memory::int32_at(candidate)); Instr candidate_instr(Memory::int32_at(candidate));
if (IsLdrPcImmediateOffset(candidate_instr) | if (IsLdrPcImmediateOffset(candidate_instr) |
IsLdrPpImmediateOffset(candidate_instr)) { IsLdrPpImmediateOffset(candidate_instr)) {
return candidate; return candidate;
} else if (IsLdrPpRegOffset(candidate_instr)) { } else {
candidate = pc - 4 * Assembler::kInstrSize; if (IsLdrPpRegOffset(candidate_instr)) {
candidate -= Assembler::kInstrSize;
}
if (CpuFeatures::IsSupported(ARMv7)) {
candidate -= 1 * Assembler::kInstrSize;
DCHECK(IsMovW(Memory::int32_at(candidate)) && DCHECK(IsMovW(Memory::int32_at(candidate)) &&
IsMovT(Memory::int32_at(candidate + Assembler::kInstrSize))); IsMovT(Memory::int32_at(candidate + Assembler::kInstrSize)));
return candidate;
} else { } else {
candidate = pc - 3 * Assembler::kInstrSize; candidate -= 3 * Assembler::kInstrSize;
DCHECK(IsMovW(Memory::int32_at(candidate)) && DCHECK(
IsMovT(Memory::int32_at(candidate + kInstrSize))); IsMovImmed(Memory::int32_at(candidate)) &&
IsOrrImmed(Memory::int32_at(candidate + Assembler::kInstrSize)) &&
IsOrrImmed(Memory::int32_at(candidate + 2 * Assembler::kInstrSize)) &&
IsOrrImmed(Memory::int32_at(candidate + 3 * Assembler::kInstrSize)));
}
return candidate; return candidate;
} }
} }
@ -469,15 +497,29 @@ Address Assembler::return_address_from_call_start(Address pc) {
// Load from constant pool, small section. // Load from constant pool, small section.
return pc + kInstrSize * 2; return pc + kInstrSize * 2;
} else { } else {
if (CpuFeatures::IsSupported(ARMv7)) {
DCHECK(IsMovW(Memory::int32_at(pc))); DCHECK(IsMovW(Memory::int32_at(pc)));
DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize))); DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)));
if (IsLdrPpRegOffset(Memory::int32_at(pc + kInstrSize))) { if (IsLdrPpRegOffset(Memory::int32_at(pc + 2 * kInstrSize))) {
// Load from constant pool, extended section. // Load from constant pool, extended section.
return pc + kInstrSize * 4; return pc + kInstrSize * 4;
} else { } else {
// A movw / movt load immediate. // A movw / movt load immediate.
return pc + kInstrSize * 3; return pc + kInstrSize * 3;
} }
} else {
DCHECK(IsMovImmed(Memory::int32_at(pc)));
DCHECK(IsOrrImmed(Memory::int32_at(pc + kInstrSize)));
DCHECK(IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)));
DCHECK(IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)));
if (IsLdrPpRegOffset(Memory::int32_at(pc + 4 * kInstrSize))) {
// Load from constant pool, extended section.
return pc + kInstrSize * 6;
} else {
// A mov / orr load immediate.
return pc + kInstrSize * 5;
}
}
} }
} }
@ -493,10 +535,17 @@ void Assembler::deserialization_set_special_target_at(
bool Assembler::is_constant_pool_load(Address pc) { bool Assembler::is_constant_pool_load(Address pc) {
if (CpuFeatures::IsSupported(ARMv7)) {
return !Assembler::IsMovW(Memory::int32_at(pc)) || return !Assembler::IsMovW(Memory::int32_at(pc)) ||
(FLAG_enable_ool_constant_pool && (FLAG_enable_ool_constant_pool &&
Assembler::IsLdrPpRegOffset( Assembler::IsLdrPpRegOffset(
Memory::int32_at(pc + 2 * Assembler::kInstrSize))); Memory::int32_at(pc + 2 * Assembler::kInstrSize)));
} else {
return !Assembler::IsMovImmed(Memory::int32_at(pc)) ||
(FLAG_enable_ool_constant_pool &&
Assembler::IsLdrPpRegOffset(
Memory::int32_at(pc + 4 * Assembler::kInstrSize)));
}
} }
@ -505,10 +554,22 @@ Address Assembler::constant_pool_entry_address(
if (FLAG_enable_ool_constant_pool) { if (FLAG_enable_ool_constant_pool) {
DCHECK(constant_pool != NULL); DCHECK(constant_pool != NULL);
int cp_offset; int cp_offset;
if (IsMovW(Memory::int32_at(pc))) { if (!CpuFeatures::IsSupported(ARMv7) && IsMovImmed(Memory::int32_at(pc))) {
DCHECK(IsOrrImmed(Memory::int32_at(pc + kInstrSize)) &&
IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)) &&
IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)) &&
IsLdrPpRegOffset(Memory::int32_at(pc + 4 * kInstrSize)));
// This is an extended constant pool lookup (ARMv6).
Instr mov_instr = instr_at(pc);
Instr orr_instr_1 = instr_at(pc + kInstrSize);
Instr orr_instr_2 = instr_at(pc + 2 * kInstrSize);
Instr orr_instr_3 = instr_at(pc + 3 * kInstrSize);
cp_offset = DecodeShiftImm(mov_instr) | DecodeShiftImm(orr_instr_1) |
DecodeShiftImm(orr_instr_2) | DecodeShiftImm(orr_instr_3);
} else if (IsMovW(Memory::int32_at(pc))) {
DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)) && DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)) &&
IsLdrPpRegOffset(Memory::int32_at(pc + 2 * kInstrSize))); IsLdrPpRegOffset(Memory::int32_at(pc + 2 * kInstrSize)));
// This is an extended constant pool lookup. // This is an extended constant pool lookup (ARMv7).
Instruction* movw_instr = Instruction::At(pc); Instruction* movw_instr = Instruction::At(pc);
Instruction* movt_instr = Instruction::At(pc + kInstrSize); Instruction* movt_instr = Instruction::At(pc + kInstrSize);
cp_offset = (movt_instr->ImmedMovwMovtValue() << 16) | cp_offset = (movt_instr->ImmedMovwMovtValue() << 16) |
@ -532,8 +593,8 @@ Address Assembler::target_address_at(Address pc,
if (is_constant_pool_load(pc)) { if (is_constant_pool_load(pc)) {
// This is a constant pool lookup. Return the value in the constant pool. // This is a constant pool lookup. Return the value in the constant pool.
return Memory::Address_at(constant_pool_entry_address(pc, constant_pool)); return Memory::Address_at(constant_pool_entry_address(pc, constant_pool));
} else { } else if (CpuFeatures::IsSupported(ARMv7)) {
// This is an movw_movt immediate load. Return the immediate. // This is an movw / movt immediate load. Return the immediate.
DCHECK(IsMovW(Memory::int32_at(pc)) && DCHECK(IsMovW(Memory::int32_at(pc)) &&
IsMovT(Memory::int32_at(pc + kInstrSize))); IsMovT(Memory::int32_at(pc + kInstrSize)));
Instruction* movw_instr = Instruction::At(pc); Instruction* movw_instr = Instruction::At(pc);
@ -541,6 +602,20 @@ Address Assembler::target_address_at(Address pc,
return reinterpret_cast<Address>( return reinterpret_cast<Address>(
(movt_instr->ImmedMovwMovtValue() << 16) | (movt_instr->ImmedMovwMovtValue() << 16) |
movw_instr->ImmedMovwMovtValue()); movw_instr->ImmedMovwMovtValue());
} else {
// This is an mov / orr immediate load. Return the immediate.
DCHECK(IsMovImmed(Memory::int32_at(pc)) &&
IsOrrImmed(Memory::int32_at(pc + kInstrSize)) &&
IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)) &&
IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)));
Instr mov_instr = instr_at(pc);
Instr orr_instr_1 = instr_at(pc + kInstrSize);
Instr orr_instr_2 = instr_at(pc + 2 * kInstrSize);
Instr orr_instr_3 = instr_at(pc + 3 * kInstrSize);
Address ret = reinterpret_cast<Address>(
DecodeShiftImm(mov_instr) | DecodeShiftImm(orr_instr_1) |
DecodeShiftImm(orr_instr_2) | DecodeShiftImm(orr_instr_3));
return ret;
} }
} }
@ -560,9 +635,9 @@ void Assembler::set_target_address_at(Address pc,
// ldr ip, [pp, #...] // ldr ip, [pp, #...]
// since the instruction accessing this address in the constant pool remains // since the instruction accessing this address in the constant pool remains
// unchanged. // unchanged.
} else { } else if (CpuFeatures::IsSupported(ARMv7)) {
// This is an movw_movt immediate load. Patch the immediate embedded in the // This is an movw / movt immediate load. Patch the immediate embedded in
// instructions. // the instructions.
DCHECK(IsMovW(Memory::int32_at(pc))); DCHECK(IsMovW(Memory::int32_at(pc)));
DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize))); DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)));
uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc); uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc);
@ -574,6 +649,26 @@ void Assembler::set_target_address_at(Address pc,
if (icache_flush_mode != SKIP_ICACHE_FLUSH) { if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
CpuFeatures::FlushICache(pc, 2 * kInstrSize); CpuFeatures::FlushICache(pc, 2 * kInstrSize);
} }
} else {
// This is an mov / orr immediate load. Patch the immediate embedded in
// the instructions.
DCHECK(IsMovImmed(Memory::int32_at(pc)) &&
IsOrrImmed(Memory::int32_at(pc + kInstrSize)) &&
IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)) &&
IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)));
uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc);
uint32_t immediate = reinterpret_cast<uint32_t>(target);
instr_ptr[0] = PatchShiftImm(instr_ptr[0], immediate & kImm8Mask);
instr_ptr[1] = PatchShiftImm(instr_ptr[1], immediate & (kImm8Mask << 8));
instr_ptr[2] = PatchShiftImm(instr_ptr[2], immediate & (kImm8Mask << 16));
instr_ptr[3] = PatchShiftImm(instr_ptr[3], immediate & (kImm8Mask << 24));
DCHECK(IsMovImmed(Memory::int32_at(pc)) &&
IsOrrImmed(Memory::int32_at(pc + kInstrSize)) &&
IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)) &&
IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)));
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
CpuFeatures::FlushICache(pc, 4 * kInstrSize);
}
} }
} }

View File

@ -39,6 +39,7 @@
#if V8_TARGET_ARCH_ARM #if V8_TARGET_ARCH_ARM
#include "src/arm/assembler-arm-inl.h" #include "src/arm/assembler-arm-inl.h"
#include "src/base/bits.h"
#include "src/base/cpu.h" #include "src/base/cpu.h"
#include "src/macro-assembler.h" #include "src/macro-assembler.h"
#include "src/serialize.h" #include "src/serialize.h"
@ -435,6 +436,10 @@ const Instr kMovLeaveCCPattern = 0x1a0 * B16;
const Instr kMovwPattern = 0x30 * B20; const Instr kMovwPattern = 0x30 * B20;
const Instr kMovtPattern = 0x34 * B20; const Instr kMovtPattern = 0x34 * B20;
const Instr kMovwLeaveCCFlip = 0x5 * B21; const Instr kMovwLeaveCCFlip = 0x5 * B21;
const Instr kMovImmedMask = 0x7f * B21;
const Instr kMovImmedPattern = 0x1d * B21;
const Instr kOrrImmedMask = 0x7f * B21;
const Instr kOrrImmedPattern = 0x1c * B21;
const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12; const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
const Instr kCmpCmnPattern = 0x15 * B20; const Instr kCmpCmnPattern = 0x15 * B20;
const Instr kCmpCmnFlip = B21; const Instr kCmpCmnFlip = B21;
@ -494,7 +499,7 @@ void Assembler::GetCode(CodeDesc* desc) {
void Assembler::Align(int m) { void Assembler::Align(int m) {
DCHECK(m >= 4 && IsPowerOf2(m)); DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
while ((pc_offset() & (m - 1)) != 0) { while ((pc_offset() & (m - 1)) != 0) {
nop(); nop();
} }
@ -1052,9 +1057,6 @@ bool Operand::must_output_reloc_info(const Assembler* assembler) const {
static bool use_mov_immediate_load(const Operand& x, static bool use_mov_immediate_load(const Operand& x,
const Assembler* assembler) { const Assembler* assembler) {
if (assembler != NULL && !assembler->is_constant_pool_available()) { if (assembler != NULL && !assembler->is_constant_pool_available()) {
// If there is no constant pool available, we must use an mov immediate.
// TODO(rmcilroy): enable ARMv6 support.
DCHECK(CpuFeatures::IsSupported(ARMv7));
return true; return true;
} else if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) && } else if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
(assembler == NULL || !assembler->predictable_code_size())) { (assembler == NULL || !assembler->predictable_code_size())) {
@ -1081,11 +1083,14 @@ int Operand::instructions_required(const Assembler* assembler,
// for the constant pool or immediate load // for the constant pool or immediate load
int instructions; int instructions;
if (use_mov_immediate_load(*this, assembler)) { if (use_mov_immediate_load(*this, assembler)) {
instructions = 2; // A movw, movt immediate load. // A movw / movt or mov / orr immediate load.
instructions = CpuFeatures::IsSupported(ARMv7) ? 2 : 4;
} else if (assembler != NULL && assembler->use_extended_constant_pool()) { } else if (assembler != NULL && assembler->use_extended_constant_pool()) {
instructions = 3; // An extended constant pool load. // An extended constant pool load.
instructions = CpuFeatures::IsSupported(ARMv7) ? 3 : 5;
} else { } else {
instructions = 1; // A small constant pool load. // A small constant pool load.
instructions = 1;
} }
if ((instr & ~kCondMask) != 13 * B21) { // mov, S not set if ((instr & ~kCondMask) != 13 * B21) { // mov, S not set
@ -1107,21 +1112,27 @@ void Assembler::move_32_bit_immediate(Register rd,
const Operand& x, const Operand& x,
Condition cond) { Condition cond) {
RelocInfo rinfo(pc_, x.rmode_, x.imm32_, NULL); RelocInfo rinfo(pc_, x.rmode_, x.imm32_, NULL);
uint32_t imm32 = static_cast<uint32_t>(x.imm32_);
if (x.must_output_reloc_info(this)) { if (x.must_output_reloc_info(this)) {
RecordRelocInfo(rinfo); RecordRelocInfo(rinfo);
} }
if (use_mov_immediate_load(x, this)) { if (use_mov_immediate_load(x, this)) {
Register target = rd.code() == pc.code() ? ip : rd; Register target = rd.code() == pc.code() ? ip : rd;
// TODO(rmcilroy): add ARMv6 support for immediate loads. if (CpuFeatures::IsSupported(ARMv7)) {
DCHECK(CpuFeatures::IsSupported(ARMv7)); if (!FLAG_enable_ool_constant_pool && x.must_output_reloc_info(this)) {
if (!FLAG_enable_ool_constant_pool &&
x.must_output_reloc_info(this)) {
// Make sure the movw/movt doesn't get separated. // Make sure the movw/movt doesn't get separated.
BlockConstPoolFor(2); BlockConstPoolFor(2);
} }
movw(target, static_cast<uint32_t>(x.imm32_ & 0xffff), cond); movw(target, imm32 & 0xffff, cond);
movt(target, static_cast<uint32_t>(x.imm32_) >> 16, cond); movt(target, imm32 >> 16, cond);
} else {
DCHECK(FLAG_enable_ool_constant_pool);
mov(target, Operand(imm32 & kImm8Mask), LeaveCC, cond);
orr(target, target, Operand(imm32 & (kImm8Mask << 8)), LeaveCC, cond);
orr(target, target, Operand(imm32 & (kImm8Mask << 16)), LeaveCC, cond);
orr(target, target, Operand(imm32 & (kImm8Mask << 24)), LeaveCC, cond);
}
if (target.code() != rd.code()) { if (target.code() != rd.code()) {
mov(rd, target, LeaveCC, cond); mov(rd, target, LeaveCC, cond);
} }
@ -1132,8 +1143,15 @@ void Assembler::move_32_bit_immediate(Register rd,
DCHECK(FLAG_enable_ool_constant_pool); DCHECK(FLAG_enable_ool_constant_pool);
Register target = rd.code() == pc.code() ? ip : rd; Register target = rd.code() == pc.code() ? ip : rd;
// Emit instructions to load constant pool offset. // Emit instructions to load constant pool offset.
if (CpuFeatures::IsSupported(ARMv7)) {
movw(target, 0, cond); movw(target, 0, cond);
movt(target, 0, cond); movt(target, 0, cond);
} else {
mov(target, Operand(0), LeaveCC, cond);
orr(target, target, Operand(0), LeaveCC, cond);
orr(target, target, Operand(0), LeaveCC, cond);
orr(target, target, Operand(0), LeaveCC, cond);
}
// Load from constant pool at offset. // Load from constant pool at offset.
ldr(rd, MemOperand(pp, target), cond); ldr(rd, MemOperand(pp, target), cond);
} else { } else {
@ -3147,6 +3165,23 @@ Instr Assembler::PatchMovwImmediate(Instr instruction, uint32_t immediate) {
} }
int Assembler::DecodeShiftImm(Instr instr) {
int rotate = Instruction::RotateValue(instr) * 2;
int immed8 = Instruction::Immed8Value(instr);
return (immed8 >> rotate) | (immed8 << (32 - rotate));
}
Instr Assembler::PatchShiftImm(Instr instr, int immed) {
uint32_t rotate_imm = 0;
uint32_t immed_8 = 0;
bool immed_fits = fits_shifter(immed, &rotate_imm, &immed_8, NULL);
DCHECK(immed_fits);
USE(immed_fits);
return (instr & ~kOff12Mask) | (rotate_imm << 8) | immed_8;
}
bool Assembler::IsNop(Instr instr, int type) { bool Assembler::IsNop(Instr instr, int type) {
DCHECK(0 <= type && type <= 14); // mov pc, pc isn't a nop. DCHECK(0 <= type && type <= 14); // mov pc, pc isn't a nop.
// Check for mov rx, rx where x = type. // Check for mov rx, rx where x = type.
@ -3154,6 +3189,16 @@ bool Assembler::IsNop(Instr instr, int type) {
} }
bool Assembler::IsMovImmed(Instr instr) {
return (instr & kMovImmedMask) == kMovImmedPattern;
}
bool Assembler::IsOrrImmed(Instr instr) {
return (instr & kOrrImmedMask) == kOrrImmedPattern;
}
// static // static
bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) { bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
uint32_t dummy1; uint32_t dummy1;
@ -3735,17 +3780,46 @@ void ConstantPoolBuilder::Populate(Assembler* assm,
// Patch vldr/ldr instruction with correct offset. // Patch vldr/ldr instruction with correct offset.
Instr instr = assm->instr_at(rinfo.pc()); Instr instr = assm->instr_at(rinfo.pc());
if (entry->section_ == ConstantPoolArray::EXTENDED_SECTION) { if (entry->section_ == ConstantPoolArray::EXTENDED_SECTION) {
if (CpuFeatures::IsSupported(ARMv7)) {
// Instructions to patch must be 'movw rd, [#0]' and 'movt rd, [#0]. // Instructions to patch must be 'movw rd, [#0]' and 'movt rd, [#0].
Instr next_instr = assm->instr_at(rinfo.pc() + Assembler::kInstrSize); Instr next_instr = assm->instr_at(rinfo.pc() + Assembler::kInstrSize);
DCHECK((Assembler::IsMovW(instr) && DCHECK((Assembler::IsMovW(instr) &&
Instruction::ImmedMovwMovtValue(instr) == 0)); Instruction::ImmedMovwMovtValue(instr) == 0));
DCHECK((Assembler::IsMovT(next_instr) && DCHECK((Assembler::IsMovT(next_instr) &&
Instruction::ImmedMovwMovtValue(next_instr) == 0)); Instruction::ImmedMovwMovtValue(next_instr) == 0));
assm->instr_at_put(rinfo.pc(), assm->instr_at_put(
Assembler::PatchMovwImmediate(instr, offset & 0xffff)); rinfo.pc(), Assembler::PatchMovwImmediate(instr, offset & 0xffff));
assm->instr_at_put( assm->instr_at_put(
rinfo.pc() + Assembler::kInstrSize, rinfo.pc() + Assembler::kInstrSize,
Assembler::PatchMovwImmediate(next_instr, offset >> 16)); Assembler::PatchMovwImmediate(next_instr, offset >> 16));
} else {
// Instructions to patch must be 'mov rd, [#0]' and 'orr rd, rd, [#0].
Instr instr_2 = assm->instr_at(rinfo.pc() + Assembler::kInstrSize);
Instr instr_3 = assm->instr_at(rinfo.pc() + 2 * Assembler::kInstrSize);
Instr instr_4 = assm->instr_at(rinfo.pc() + 3 * Assembler::kInstrSize);
DCHECK((Assembler::IsMovImmed(instr) &&
Instruction::Immed8Value(instr) == 0));
DCHECK((Assembler::IsOrrImmed(instr_2) &&
Instruction::Immed8Value(instr_2) == 0) &&
Assembler::GetRn(instr_2).is(Assembler::GetRd(instr_2)));
DCHECK((Assembler::IsOrrImmed(instr_3) &&
Instruction::Immed8Value(instr_3) == 0) &&
Assembler::GetRn(instr_3).is(Assembler::GetRd(instr_3)));
DCHECK((Assembler::IsOrrImmed(instr_4) &&
Instruction::Immed8Value(instr_4) == 0) &&
Assembler::GetRn(instr_4).is(Assembler::GetRd(instr_4)));
assm->instr_at_put(
rinfo.pc(), Assembler::PatchShiftImm(instr, (offset & kImm8Mask)));
assm->instr_at_put(
rinfo.pc() + Assembler::kInstrSize,
Assembler::PatchShiftImm(instr_2, (offset & (kImm8Mask << 8))));
assm->instr_at_put(
rinfo.pc() + 2 * Assembler::kInstrSize,
Assembler::PatchShiftImm(instr_3, (offset & (kImm8Mask << 16))));
assm->instr_at_put(
rinfo.pc() + 3 * Assembler::kInstrSize,
Assembler::PatchShiftImm(instr_4, (offset & (kImm8Mask << 24))));
}
} else if (type == ConstantPoolArray::INT64) { } else if (type == ConstantPoolArray::INT64) {
// Instruction to patch must be 'vldr rd, [pp, #0]'. // Instruction to patch must be 'vldr rd, [pp, #0]'.
DCHECK((Assembler::IsVldrDPpImmediateOffset(instr) && DCHECK((Assembler::IsVldrDPpImmediateOffset(instr) &&

View File

@ -218,6 +218,11 @@ struct DwVfpRegister {
inline static int NumReservedRegisters(); inline static int NumReservedRegisters();
inline static int NumAllocatableRegisters(); inline static int NumAllocatableRegisters();
// TODO(turbofan): This is a temporary work-around required because our
// register allocator does not yet support the aliasing of single/double
// registers on ARM.
inline static int NumAllocatableAliasedRegisters();
inline static int ToAllocationIndex(DwVfpRegister reg); inline static int ToAllocationIndex(DwVfpRegister reg);
static const char* AllocationIndexToString(int index); static const char* AllocationIndexToString(int index);
inline static DwVfpRegister FromAllocationIndex(int index); inline static DwVfpRegister FromAllocationIndex(int index);
@ -1449,12 +1454,16 @@ class Assembler : public AssemblerBase {
static Register GetCmpImmediateRegister(Instr instr); static Register GetCmpImmediateRegister(Instr instr);
static int GetCmpImmediateRawImmediate(Instr instr); static int GetCmpImmediateRawImmediate(Instr instr);
static bool IsNop(Instr instr, int type = NON_MARKING_NOP); static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
static bool IsMovImmed(Instr instr);
static bool IsOrrImmed(Instr instr);
static bool IsMovT(Instr instr); static bool IsMovT(Instr instr);
static Instr GetMovTPattern(); static Instr GetMovTPattern();
static bool IsMovW(Instr instr); static bool IsMovW(Instr instr);
static Instr GetMovWPattern(); static Instr GetMovWPattern();
static Instr EncodeMovwImmediate(uint32_t immediate); static Instr EncodeMovwImmediate(uint32_t immediate);
static Instr PatchMovwImmediate(Instr instruction, uint32_t immediate); static Instr PatchMovwImmediate(Instr instruction, uint32_t immediate);
static int DecodeShiftImm(Instr instr);
static Instr PatchShiftImm(Instr instr, int immed);
// Constants in pools are accessed via pc relative addressing, which can // Constants in pools are accessed via pc relative addressing, which can
// reach +/-4KB for integer PC-relative loads and +/-1KB for floating-point // reach +/-4KB for integer PC-relative loads and +/-1KB for floating-point

View File

@ -10,8 +10,7 @@
#include "src/debug.h" #include "src/debug.h"
#include "src/deoptimizer.h" #include "src/deoptimizer.h"
#include "src/full-codegen.h" #include "src/full-codegen.h"
#include "src/runtime.h" #include "src/runtime/runtime.h"
#include "src/stub-cache.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -808,8 +807,8 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
} }
void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) { void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized); CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm); GenerateTailCallToReturnedCode(masm);
} }
@ -1422,13 +1421,7 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
__ ldr(r1, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp + __ ldr(r1, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
kPointerSize))); kPointerSize)));
if (FLAG_enable_ool_constant_pool) { __ LeaveFrame(StackFrame::ARGUMENTS_ADAPTOR);
__ add(sp, fp, Operand(StandardFrameConstants::kConstantPoolOffset));
__ ldm(ia_w, sp, pp.bit() | fp.bit() | lr.bit());
} else {
__ mov(sp, fp);;
__ ldm(ia_w, sp, fp.bit() | lr.bit());
}
__ add(sp, sp, Operand::PointerOffsetFromSmiKey(r1)); __ add(sp, sp, Operand::PointerOffsetFromSmiKey(r1));
__ add(sp, sp, Operand(kPointerSize)); // adjust for receiver __ add(sp, sp, Operand(kPointerSize)); // adjust for receiver
} }

File diff suppressed because it is too large Load Diff

View File

@ -5,8 +5,6 @@
#ifndef V8_ARM_CODE_STUBS_ARM_H_ #ifndef V8_ARM_CODE_STUBS_ARM_H_
#define V8_ARM_CODE_STUBS_ARM_H_ #define V8_ARM_CODE_STUBS_ARM_H_
#include "src/ic-inl.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -14,24 +12,6 @@ namespace internal {
void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code); void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
class StoreBufferOverflowStub: public PlatformCodeStub {
public:
StoreBufferOverflowStub(Isolate* isolate, SaveFPRegsMode save_fp)
: PlatformCodeStub(isolate), save_doubles_(save_fp) {}
void Generate(MacroAssembler* masm);
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
private:
SaveFPRegsMode save_doubles_;
Major MajorKey() const { return StoreBufferOverflow; }
int MinorKey() const { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
};
class StringHelper : public AllStatic { class StringHelper : public AllStatic {
public: public:
// Generate code for copying a large number of characters. This function // Generate code for copying a large number of characters. This function
@ -45,71 +25,24 @@ class StringHelper : public AllStatic {
Register scratch, Register scratch,
String::Encoding encoding); String::Encoding encoding);
// Compares two flat one-byte strings and returns result in r0.
static void GenerateCompareFlatOneByteStrings(
MacroAssembler* masm, Register left, Register right, Register scratch1,
Register scratch2, Register scratch3, Register scratch4);
// Generate string hash. // Compares two flat one-byte strings for equality and returns result in r0.
static void GenerateHashInit(MacroAssembler* masm, static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
Register hash, Register left, Register right,
Register character);
static void GenerateHashAddCharacter(MacroAssembler* masm,
Register hash,
Register character);
static void GenerateHashGetHash(MacroAssembler* masm,
Register hash);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
};
class SubStringStub: public PlatformCodeStub {
public:
explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
private:
Major MajorKey() const { return SubString; }
int MinorKey() const { return 0; }
void Generate(MacroAssembler* masm);
};
class StringCompareStub: public PlatformCodeStub {
public:
explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
// Compares two flat ASCII strings and returns result in r0.
static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
Register left,
Register right,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4);
// Compares two flat ASCII strings for equality and returns result
// in r0.
static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
Register left,
Register right,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
Register scratch3); Register scratch3);
private: private:
virtual Major MajorKey() const { return StringCompare; } static void GenerateOneByteCharsCompareLoop(
virtual int MinorKey() const { return 0; } MacroAssembler* masm, Register left, Register right, Register length,
virtual void Generate(MacroAssembler* masm); Register scratch1, Register scratch2, Label* chars_not_equal);
static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm, DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
Register left,
Register right,
Register length,
Register scratch1,
Register scratch2,
Label* chars_not_equal);
}; };
@ -118,36 +51,36 @@ class StringCompareStub: public PlatformCodeStub {
// so you don't have to set up the frame. // so you don't have to set up the frame.
class WriteInt32ToHeapNumberStub : public PlatformCodeStub { class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
public: public:
WriteInt32ToHeapNumberStub(Isolate* isolate, WriteInt32ToHeapNumberStub(Isolate* isolate, Register the_int,
Register the_int, Register the_heap_number, Register scratch)
Register the_heap_number, : PlatformCodeStub(isolate) {
Register scratch) minor_key_ = IntRegisterBits::encode(the_int.code()) |
: PlatformCodeStub(isolate), HeapNumberRegisterBits::encode(the_heap_number.code()) |
the_int_(the_int), ScratchRegisterBits::encode(scratch.code());
the_heap_number_(the_heap_number), }
scratch_(scratch) { }
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate); static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
private: private:
Register the_int_; Register the_int() const {
Register the_heap_number_; return Register::from_code(IntRegisterBits::decode(minor_key_));
Register scratch_; }
Register the_heap_number() const {
return Register::from_code(HeapNumberRegisterBits::decode(minor_key_));
}
Register scratch() const {
return Register::from_code(ScratchRegisterBits::decode(minor_key_));
}
// Minor key encoding in 16 bits. // Minor key encoding in 16 bits.
class IntRegisterBits: public BitField<int, 0, 4> {}; class IntRegisterBits: public BitField<int, 0, 4> {};
class HeapNumberRegisterBits: public BitField<int, 4, 4> {}; class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
class ScratchRegisterBits: public BitField<int, 8, 4> {}; class ScratchRegisterBits: public BitField<int, 8, 4> {};
Major MajorKey() const { return WriteInt32ToHeapNumber; } DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
int MinorKey() const { DEFINE_PLATFORM_CODE_STUB(WriteInt32ToHeapNumber, PlatformCodeStub);
// Encode the parameters in a unique 16 bit value.
return IntRegisterBits::encode(the_int_.code())
| HeapNumberRegisterBits::encode(the_heap_number_.code())
| ScratchRegisterBits::encode(scratch_.code());
}
void Generate(MacroAssembler* masm);
}; };
@ -160,16 +93,19 @@ class RecordWriteStub: public PlatformCodeStub {
RememberedSetAction remembered_set_action, RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode) SaveFPRegsMode fp_mode)
: PlatformCodeStub(isolate), : PlatformCodeStub(isolate),
object_(object),
value_(value),
address_(address),
remembered_set_action_(remembered_set_action),
save_fp_regs_mode_(fp_mode),
regs_(object, // An input reg. regs_(object, // An input reg.
address, // An input reg. address, // An input reg.
value) { // One scratch reg. value) { // One scratch reg.
minor_key_ = ObjectBits::encode(object.code()) |
ValueBits::encode(value.code()) |
AddressBits::encode(address.code()) |
RememberedSetActionBits::encode(remembered_set_action) |
SaveFPRegsModeBits::encode(fp_mode);
} }
RecordWriteStub(uint32_t key, Isolate* isolate)
: PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
enum Mode { enum Mode {
STORE_BUFFER_ONLY, STORE_BUFFER_ONLY,
INCREMENTAL, INCREMENTAL,
@ -233,6 +169,8 @@ class RecordWriteStub: public PlatformCodeStub {
2 * Assembler::kInstrSize); 2 * Assembler::kInstrSize);
} }
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
private: private:
// This is a helper class for freeing up 3 scratch registers. The input is // This is a helper class for freeing up 3 scratch registers. The input is
// two registers that must be preserved and one scratch register provided by // two registers that must be preserved and one scratch register provided by
@ -297,7 +235,9 @@ class RecordWriteStub: public PlatformCodeStub {
kUpdateRememberedSetOnNoNeedToInformIncrementalMarker kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
}; };
void Generate(MacroAssembler* masm); virtual inline Major MajorKey() const FINAL OVERRIDE { return RecordWrite; }
virtual void Generate(MacroAssembler* masm) OVERRIDE;
void GenerateIncremental(MacroAssembler* masm, Mode mode); void GenerateIncremental(MacroAssembler* masm, Mode mode);
void CheckNeedsToInformIncrementalMarker( void CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm, MacroAssembler* masm,
@ -305,33 +245,40 @@ class RecordWriteStub: public PlatformCodeStub {
Mode mode); Mode mode);
void InformIncrementalMarker(MacroAssembler* masm); void InformIncrementalMarker(MacroAssembler* masm);
Major MajorKey() const { return RecordWrite; }
int MinorKey() const {
return ObjectBits::encode(object_.code()) |
ValueBits::encode(value_.code()) |
AddressBits::encode(address_.code()) |
RememberedSetActionBits::encode(remembered_set_action_) |
SaveFPRegsModeBits::encode(save_fp_regs_mode_);
}
void Activate(Code* code) { void Activate(Code* code) {
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code); code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
} }
Register object() const {
return Register::from_code(ObjectBits::decode(minor_key_));
}
Register value() const {
return Register::from_code(ValueBits::decode(minor_key_));
}
Register address() const {
return Register::from_code(AddressBits::decode(minor_key_));
}
RememberedSetAction remembered_set_action() const {
return RememberedSetActionBits::decode(minor_key_);
}
SaveFPRegsMode save_fp_regs_mode() const {
return SaveFPRegsModeBits::decode(minor_key_);
}
class ObjectBits: public BitField<int, 0, 4> {}; class ObjectBits: public BitField<int, 0, 4> {};
class ValueBits: public BitField<int, 4, 4> {}; class ValueBits: public BitField<int, 4, 4> {};
class AddressBits: public BitField<int, 8, 4> {}; class AddressBits: public BitField<int, 8, 4> {};
class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {}; class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {};
class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {}; class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {};
Register object_;
Register value_;
Register address_;
RememberedSetAction remembered_set_action_;
SaveFPRegsMode save_fp_regs_mode_;
Label slow_; Label slow_;
RegisterAllocation regs_; RegisterAllocation regs_;
DISALLOW_COPY_AND_ASSIGN(RecordWriteStub);
}; };
@ -343,14 +290,13 @@ class RecordWriteStub: public PlatformCodeStub {
class DirectCEntryStub: public PlatformCodeStub { class DirectCEntryStub: public PlatformCodeStub {
public: public:
explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {} explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
void Generate(MacroAssembler* masm);
void GenerateCall(MacroAssembler* masm, Register target); void GenerateCall(MacroAssembler* masm, Register target);
private: private:
Major MajorKey() const { return DirectCEntry; }
int MinorKey() const { return 0; }
bool NeedsImmovableCode() { return true; } bool NeedsImmovableCode() { return true; }
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
}; };
@ -359,9 +305,9 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP }; enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
NameDictionaryLookupStub(Isolate* isolate, LookupMode mode) NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
: PlatformCodeStub(isolate), mode_(mode) { } : PlatformCodeStub(isolate) {
minor_key_ = LookupModeBits::encode(mode);
void Generate(MacroAssembler* masm); }
static void GenerateNegativeLookup(MacroAssembler* masm, static void GenerateNegativeLookup(MacroAssembler* masm,
Label* miss, Label* miss,
@ -393,29 +339,14 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
NameDictionary::kHeaderSize + NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize; NameDictionary::kElementsStartIndex * kPointerSize;
Major MajorKey() const { return NameDictionaryLookup; } LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
int MinorKey() const { return LookupModeBits::encode(mode_); }
class LookupModeBits: public BitField<LookupMode, 0, 1> {}; class LookupModeBits: public BitField<LookupMode, 0, 1> {};
LookupMode mode_; DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
}; };
class PlatformInterfaceDescriptor {
public:
explicit PlatformInterfaceDescriptor(
TargetAddressStorageMode storage_mode)
: storage_mode_(storage_mode) { }
TargetAddressStorageMode storage_mode() { return storage_mode_; }
private:
TargetAddressStorageMode storage_mode_;
};
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_ARM_CODE_STUBS_ARM_H_ #endif // V8_ARM_CODE_STUBS_ARM_H_

View File

@ -759,16 +759,16 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ b(ne, call_runtime); __ b(ne, call_runtime);
__ ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset)); __ ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
Label ascii, done; Label one_byte, done;
__ bind(&check_encoding); __ bind(&check_encoding);
STATIC_ASSERT(kTwoByteStringTag == 0); STATIC_ASSERT(kTwoByteStringTag == 0);
__ tst(result, Operand(kStringEncodingMask)); __ tst(result, Operand(kStringEncodingMask));
__ b(ne, &ascii); __ b(ne, &one_byte);
// Two-byte string. // Two-byte string.
__ ldrh(result, MemOperand(string, index, LSL, 1)); __ ldrh(result, MemOperand(string, index, LSL, 1));
__ jmp(&done); __ jmp(&done);
__ bind(&ascii); __ bind(&one_byte);
// Ascii string. // One-byte string.
__ ldrb(result, MemOperand(string, index)); __ ldrb(result, MemOperand(string, index));
__ bind(&done); __ bind(&done);
} }

View File

@ -6,7 +6,7 @@
#define V8_ARM_CODEGEN_ARM_H_ #define V8_ARM_CODEGEN_ARM_H_
#include "src/ast.h" #include "src/ast.h"
#include "src/ic-inl.h" #include "src/macro-assembler.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {

View File

@ -564,7 +564,9 @@ class Instruction {
inline int ShiftAmountValue() const { return Bits(11, 7); } inline int ShiftAmountValue() const { return Bits(11, 7); }
// with immediate // with immediate
inline int RotateValue() const { return Bits(11, 8); } inline int RotateValue() const { return Bits(11, 8); }
DECLARE_STATIC_ACCESSOR(RotateValue);
inline int Immed8Value() const { return Bits(7, 0); } inline int Immed8Value() const { return Bits(7, 0); }
DECLARE_STATIC_ACCESSOR(Immed8Value);
inline int Immed4Value() const { return Bits(19, 16); } inline int Immed4Value() const { return Bits(19, 16); }
inline int ImmedMovwMovtValue() const { inline int ImmedMovwMovtValue() const {
return Immed4Value() << 12 | Offset12Value(); } return Immed4Value() << 12 | Offset12Value(); }

View File

@ -176,17 +176,17 @@ void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) { void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC load (from ic-arm.cc). // Calling convention for IC load (from ic-arm.cc).
Register receiver = LoadIC::ReceiverRegister(); Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadIC::NameRegister(); Register name = LoadDescriptor::NameRegister();
Generate_DebugBreakCallHelper(masm, receiver.bit() | name.bit(), 0); Generate_DebugBreakCallHelper(masm, receiver.bit() | name.bit(), 0);
} }
void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) { void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC store (from ic-arm.cc). // Calling convention for IC store (from ic-arm.cc).
Register receiver = StoreIC::ReceiverRegister(); Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreIC::NameRegister(); Register name = StoreDescriptor::NameRegister();
Register value = StoreIC::ValueRegister(); Register value = StoreDescriptor::ValueRegister();
Generate_DebugBreakCallHelper( Generate_DebugBreakCallHelper(
masm, receiver.bit() | name.bit() | value.bit(), 0); masm, receiver.bit() | name.bit() | value.bit(), 0);
} }
@ -200,9 +200,9 @@ void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) { void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC keyed store call (from ic-arm.cc). // Calling convention for IC keyed store call (from ic-arm.cc).
Register receiver = KeyedStoreIC::ReceiverRegister(); Register receiver = StoreDescriptor::ReceiverRegister();
Register name = KeyedStoreIC::NameRegister(); Register name = StoreDescriptor::NameRegister();
Register value = KeyedStoreIC::ValueRegister(); Register value = StoreDescriptor::ValueRegister();
Generate_DebugBreakCallHelper( Generate_DebugBreakCallHelper(
masm, receiver.bit() | name.bit() | value.bit(), 0); masm, receiver.bit() | name.bit() | value.bit(), 0);
} }

View File

@ -101,7 +101,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
void Deoptimizer::SetPlatformCompiledStubRegisters( void Deoptimizer::SetPlatformCompiledStubRegisters(
FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) { FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
ApiFunction function(descriptor->deoptimization_handler()); ApiFunction function(descriptor->deoptimization_handler());
ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_); ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
intptr_t handler = reinterpret_cast<intptr_t>(xref.address()); intptr_t handler = reinterpret_cast<intptr_t>(xref.address());

View File

@ -6,15 +6,16 @@
#if V8_TARGET_ARCH_ARM #if V8_TARGET_ARCH_ARM
#include "src/code-factory.h"
#include "src/code-stubs.h" #include "src/code-stubs.h"
#include "src/codegen.h" #include "src/codegen.h"
#include "src/compiler.h" #include "src/compiler.h"
#include "src/debug.h" #include "src/debug.h"
#include "src/full-codegen.h" #include "src/full-codegen.h"
#include "src/ic/ic.h"
#include "src/isolate-inl.h" #include "src/isolate-inl.h"
#include "src/parser.h" #include "src/parser.h"
#include "src/scopes.h" #include "src/scopes.h"
#include "src/stub-cache.h"
#include "src/arm/code-stubs-arm.h" #include "src/arm/code-stubs-arm.h"
#include "src/arm/macro-assembler-arm.h" #include "src/arm/macro-assembler-arm.h"
@ -346,7 +347,11 @@ void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
} }
#ifdef CAN_USE_ARMV7_INSTRUCTIONS
static const int kProfileCounterResetSequenceLength = 5 * Assembler::kInstrSize; static const int kProfileCounterResetSequenceLength = 5 * Assembler::kInstrSize;
#else
static const int kProfileCounterResetSequenceLength = 7 * Assembler::kInstrSize;
#endif
void FullCodeGenerator::EmitProfilingCounterReset() { void FullCodeGenerator::EmitProfilingCounterReset() {
@ -361,10 +366,13 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
reset_value = FLAG_interrupt_budget >> 4; reset_value = FLAG_interrupt_budget >> 4;
} }
__ mov(r2, Operand(profiling_counter_)); __ mov(r2, Operand(profiling_counter_));
// The mov instruction above can be either 1, 2 or 3 instructions depending // The mov instruction above can be either 1 to 3 (for ARMv7) or 1 to 5
// upon whether it is an extended constant pool - insert nop to compensate. // instructions (for ARMv6) depending upon whether it is an extended constant
DCHECK(masm_->InstructionsGeneratedSince(&start) <= 3); // pool - insert nop to compensate.
while (masm_->InstructionsGeneratedSince(&start) != 3) { int expected_instr_count =
(kProfileCounterResetSequenceLength / Assembler::kInstrSize) - 2;
DCHECK(masm_->InstructionsGeneratedSince(&start) <= expected_instr_count);
while (masm_->InstructionsGeneratedSince(&start) != expected_instr_count) {
__ nop(); __ nop();
} }
__ mov(r3, Operand(Smi::FromInt(reset_value))); __ mov(r3, Operand(Smi::FromInt(reset_value)));
@ -448,10 +456,12 @@ void FullCodeGenerator::EmitReturnSequence() {
PredictableCodeSizeScope predictable(masm_, -1); PredictableCodeSizeScope predictable(masm_, -1);
__ RecordJSReturn(); __ RecordJSReturn();
int no_frame_start = __ LeaveFrame(StackFrame::JAVA_SCRIPT); int no_frame_start = __ LeaveFrame(StackFrame::JAVA_SCRIPT);
{ ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
__ add(sp, sp, Operand(sp_delta)); __ add(sp, sp, Operand(sp_delta));
__ Jump(lr); __ Jump(lr);
info_->AddNoFrameRange(no_frame_start, masm_->pc_offset()); info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
} }
}
#ifdef DEBUG #ifdef DEBUG
// Check that the size of the code used for returning is large enough // Check that the size of the code used for returning is large enough
@ -1045,7 +1055,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback. // Record position before stub call for type feedback.
SetSourcePosition(clause->position()); SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT); Handle<Code> ic =
CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
CallIC(ic, clause->CompareId()); CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo(); patch_site.EmitPatchInfo();
@ -1182,7 +1193,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(&fixed_array); __ bind(&fixed_array);
__ Move(r1, FeedbackVector()); __ Move(r1, FeedbackVector());
__ mov(r2, Operand(TypeFeedbackInfo::MegamorphicSentinel(isolate()))); __ mov(r2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
__ str(r2, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(slot))); __ str(r2, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(slot)));
__ mov(r1, Operand(Smi::FromInt(1))); // Smi indicates slow check __ mov(r1, Operand(Smi::FromInt(1))); // Smi indicates slow check
@ -1324,9 +1335,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure && !pretenure &&
scope()->is_function_scope() && scope()->is_function_scope() &&
info->num_literals() == 0) { info->num_literals() == 0) {
FastNewClosureStub stub(isolate(), FastNewClosureStub stub(isolate(), info->strict_mode(), info->kind());
info->strict_mode(),
info->is_generator());
__ mov(r2, Operand(info)); __ mov(r2, Operand(info));
__ CallStub(&stub); __ CallStub(&stub);
} else { } else {
@ -1346,6 +1355,25 @@ void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
} }
void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
Comment cnmt(masm_, "[ SuperReference ");
__ ldr(LoadDescriptor::ReceiverRegister(),
MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
Handle<Symbol> home_object_symbol(isolate()->heap()->home_object_symbol());
__ Move(LoadDescriptor::NameRegister(), home_object_symbol);
CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
__ cmp(r0, Operand(isolate()->factory()->undefined_value()));
Label done;
__ b(ne, &done);
__ CallRuntime(Runtime::kThrowNonMethodError, 0);
__ bind(&done);
}
void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy, void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
TypeofState typeof_state, TypeofState typeof_state,
Label* slow) { Label* slow) {
@ -1394,10 +1422,10 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
__ bind(&fast); __ bind(&fast);
} }
__ ldr(LoadIC::ReceiverRegister(), GlobalObjectOperand()); __ ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
__ mov(LoadIC::NameRegister(), Operand(proxy->var()->name())); __ mov(LoadDescriptor::NameRegister(), Operand(proxy->var()->name()));
if (FLAG_vector_ics) { if (FLAG_vector_ics) {
__ mov(LoadIC::SlotRegister(), __ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(proxy->VariableFeedbackSlot()))); Operand(Smi::FromInt(proxy->VariableFeedbackSlot())));
} }
@ -1483,10 +1511,10 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
switch (var->location()) { switch (var->location()) {
case Variable::UNALLOCATED: { case Variable::UNALLOCATED: {
Comment cmnt(masm_, "[ Global variable"); Comment cmnt(masm_, "[ Global variable");
__ ldr(LoadIC::ReceiverRegister(), GlobalObjectOperand()); __ ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
__ mov(LoadIC::NameRegister(), Operand(var->name())); __ mov(LoadDescriptor::NameRegister(), Operand(var->name()));
if (FLAG_vector_ics) { if (FLAG_vector_ics) {
__ mov(LoadIC::SlotRegister(), __ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(proxy->VariableFeedbackSlot()))); Operand(Smi::FromInt(proxy->VariableFeedbackSlot())));
} }
CallLoadIC(CONTEXTUAL); CallLoadIC(CONTEXTUAL);
@ -1694,9 +1722,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (key->value()->IsInternalizedString()) { if (key->value()->IsInternalizedString()) {
if (property->emit_store()) { if (property->emit_store()) {
VisitForAccumulatorValue(value); VisitForAccumulatorValue(value);
DCHECK(StoreIC::ValueRegister().is(r0)); DCHECK(StoreDescriptor::ValueRegister().is(r0));
__ mov(StoreIC::NameRegister(), Operand(key->value())); __ mov(StoreDescriptor::NameRegister(), Operand(key->value()));
__ ldr(StoreIC::ReceiverRegister(), MemOperand(sp)); __ ldr(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
CallStoreIC(key->LiteralFeedbackId()); CallStoreIC(key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS); PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else { } else {
@ -1857,12 +1885,18 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
// Left-hand side can only be a property, a global or a (parameter or local) // Left-hand side can only be a property, a global or a (parameter or local)
// slot. // slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY }; enum LhsKind {
VARIABLE,
NAMED_PROPERTY,
KEYED_PROPERTY,
NAMED_SUPER_PROPERTY
};
LhsKind assign_type = VARIABLE; LhsKind assign_type = VARIABLE;
Property* property = expr->target()->AsProperty(); Property* property = expr->target()->AsProperty();
if (property != NULL) { if (property != NULL) {
assign_type = (property->key()->IsPropertyName()) assign_type = (property->key()->IsPropertyName())
? NAMED_PROPERTY ? (property->IsSuperAccess() ? NAMED_SUPER_PROPERTY
: NAMED_PROPERTY)
: KEYED_PROPERTY; : KEYED_PROPERTY;
} }
@ -1875,17 +1909,29 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
if (expr->is_compound()) { if (expr->is_compound()) {
// We need the receiver both on the stack and in the register. // We need the receiver both on the stack and in the register.
VisitForStackValue(property->obj()); VisitForStackValue(property->obj());
__ ldr(LoadIC::ReceiverRegister(), MemOperand(sp, 0)); __ ldr(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
} else { } else {
VisitForStackValue(property->obj()); VisitForStackValue(property->obj());
} }
break; break;
case NAMED_SUPER_PROPERTY:
VisitForStackValue(property->obj()->AsSuperReference()->this_var());
EmitLoadHomeObject(property->obj()->AsSuperReference());
__ Push(result_register());
if (expr->is_compound()) {
const Register scratch = r1;
__ ldr(scratch, MemOperand(sp, kPointerSize));
__ Push(scratch);
__ Push(result_register());
}
break;
case KEYED_PROPERTY: case KEYED_PROPERTY:
if (expr->is_compound()) { if (expr->is_compound()) {
VisitForStackValue(property->obj()); VisitForStackValue(property->obj());
VisitForStackValue(property->key()); VisitForStackValue(property->key());
__ ldr(LoadIC::ReceiverRegister(), MemOperand(sp, 1 * kPointerSize)); __ ldr(LoadDescriptor::ReceiverRegister(),
__ ldr(LoadIC::NameRegister(), MemOperand(sp, 0)); MemOperand(sp, 1 * kPointerSize));
__ ldr(LoadDescriptor::NameRegister(), MemOperand(sp, 0));
} else { } else {
VisitForStackValue(property->obj()); VisitForStackValue(property->obj());
VisitForStackValue(property->key()); VisitForStackValue(property->key());
@ -1906,6 +1952,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
EmitNamedPropertyLoad(property); EmitNamedPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(), TOS_REG); PrepareForBailoutForId(property->LoadId(), TOS_REG);
break; break;
case NAMED_SUPER_PROPERTY:
EmitNamedSuperPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(), TOS_REG);
break;
case KEYED_PROPERTY: case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property); EmitKeyedPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(), TOS_REG); PrepareForBailoutForId(property->LoadId(), TOS_REG);
@ -1952,6 +2002,9 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case NAMED_PROPERTY: case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr); EmitNamedPropertyAssignment(expr);
break; break;
case NAMED_SUPER_PROPERTY:
EmitNamedSuperPropertyAssignment(expr);
break;
case KEYED_PROPERTY: case KEYED_PROPERTY:
EmitKeyedPropertyAssignment(expr); EmitKeyedPropertyAssignment(expr);
break; break;
@ -1966,12 +2019,12 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
VisitForStackValue(expr->expression()); VisitForStackValue(expr->expression());
switch (expr->yield_kind()) { switch (expr->yield_kind()) {
case Yield::SUSPEND: case Yield::kSuspend:
// Pop value from top-of-stack slot; box result into result register. // Pop value from top-of-stack slot; box result into result register.
EmitCreateIteratorResult(false); EmitCreateIteratorResult(false);
__ push(result_register()); __ push(result_register());
// Fall through. // Fall through.
case Yield::INITIAL: { case Yield::kInitial: {
Label suspend, continuation, post_runtime, resume; Label suspend, continuation, post_runtime, resume;
__ jmp(&suspend); __ jmp(&suspend);
@ -2003,7 +2056,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
break; break;
} }
case Yield::FINAL: { case Yield::kFinal: {
VisitForAccumulatorValue(expr->generator_object()); VisitForAccumulatorValue(expr->generator_object());
__ mov(r1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed))); __ mov(r1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
__ str(r1, FieldMemOperand(result_register(), __ str(r1, FieldMemOperand(result_register(),
@ -2015,7 +2068,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
break; break;
} }
case Yield::DELEGATING: { case Yield::kDelegating: {
VisitForStackValue(expr->generator_object()); VisitForStackValue(expr->generator_object());
// Initial stack layout is as follows: // Initial stack layout is as follows:
@ -2024,8 +2077,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
Label l_catch, l_try, l_suspend, l_continuation, l_resume; Label l_catch, l_try, l_suspend, l_continuation, l_resume;
Label l_next, l_call, l_loop; Label l_next, l_call, l_loop;
Register load_receiver = LoadIC::ReceiverRegister(); Register load_receiver = LoadDescriptor::ReceiverRegister();
Register load_name = LoadIC::NameRegister(); Register load_name = LoadDescriptor::NameRegister();
// Initial send value is undefined. // Initial send value is undefined.
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex); __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
@ -2080,10 +2133,10 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ ldr(load_receiver, MemOperand(sp, kPointerSize)); __ ldr(load_receiver, MemOperand(sp, kPointerSize));
__ ldr(load_name, MemOperand(sp, 2 * kPointerSize)); __ ldr(load_name, MemOperand(sp, 2 * kPointerSize));
if (FLAG_vector_ics) { if (FLAG_vector_ics) {
__ mov(LoadIC::SlotRegister(), __ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(expr->KeyedLoadFeedbackSlot()))); Operand(Smi::FromInt(expr->KeyedLoadFeedbackSlot())));
} }
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
CallIC(ic, TypeFeedbackId::None()); CallIC(ic, TypeFeedbackId::None());
__ mov(r1, r0); __ mov(r1, r0);
__ str(r1, MemOperand(sp, 2 * kPointerSize)); __ str(r1, MemOperand(sp, 2 * kPointerSize));
@ -2100,7 +2153,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ push(load_receiver); // save result __ push(load_receiver); // save result
__ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done" __ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done"
if (FLAG_vector_ics) { if (FLAG_vector_ics) {
__ mov(LoadIC::SlotRegister(), __ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(expr->DoneFeedbackSlot()))); Operand(Smi::FromInt(expr->DoneFeedbackSlot())));
} }
CallLoadIC(NOT_CONTEXTUAL); // r0=result.done CallLoadIC(NOT_CONTEXTUAL); // r0=result.done
@ -2113,7 +2166,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ pop(load_receiver); // result __ pop(load_receiver); // result
__ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value" __ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value"
if (FLAG_vector_ics) { if (FLAG_vector_ics) {
__ mov(LoadIC::SlotRegister(), __ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(expr->ValueFeedbackSlot()))); Operand(Smi::FromInt(expr->ValueFeedbackSlot())));
} }
CallLoadIC(NOT_CONTEXTUAL); // r0=result.value CallLoadIC(NOT_CONTEXTUAL); // r0=result.value
@ -2290,9 +2343,11 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) { void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position()); SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral(); Literal* key = prop->key()->AsLiteral();
__ mov(LoadIC::NameRegister(), Operand(key->value())); DCHECK(!prop->IsSuperAccess());
__ mov(LoadDescriptor::NameRegister(), Operand(key->value()));
if (FLAG_vector_ics) { if (FLAG_vector_ics) {
__ mov(LoadIC::SlotRegister(), __ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(prop->PropertyFeedbackSlot()))); Operand(Smi::FromInt(prop->PropertyFeedbackSlot())));
CallLoadIC(NOT_CONTEXTUAL); CallLoadIC(NOT_CONTEXTUAL);
} else { } else {
@ -2301,11 +2356,23 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
} }
void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
// Stack: receiver, home_object.
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
DCHECK(!key->value()->IsSmi());
DCHECK(prop->IsSuperAccess());
__ Push(key->value());
__ CallRuntime(Runtime::kLoadFromSuper, 3);
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) { void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position()); SetSourcePosition(prop->position());
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
if (FLAG_vector_ics) { if (FLAG_vector_ics) {
__ mov(LoadIC::SlotRegister(), __ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(prop->PropertyFeedbackSlot()))); Operand(Smi::FromInt(prop->PropertyFeedbackSlot())));
CallIC(ic); CallIC(ic);
} else { } else {
@ -2336,8 +2403,8 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
patch_site.EmitJumpIfSmi(scratch1, &smi_case); patch_site.EmitJumpIfSmi(scratch1, &smi_case);
__ bind(&stub_call); __ bind(&stub_call);
BinaryOpICStub stub(isolate(), op, mode); Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId()); CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo(); patch_site.EmitPatchInfo();
__ jmp(&done); __ jmp(&done);
@ -2412,9 +2479,9 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
Token::Value op, Token::Value op,
OverwriteMode mode) { OverwriteMode mode) {
__ pop(r1); __ pop(r1);
BinaryOpICStub stub(isolate(), op, mode); Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code. JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId()); CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo(); patch_site.EmitPatchInfo();
context()->Plug(r0); context()->Plug(r0);
} }
@ -2444,9 +2511,9 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
case NAMED_PROPERTY: { case NAMED_PROPERTY: {
__ push(r0); // Preserve value. __ push(r0); // Preserve value.
VisitForAccumulatorValue(prop->obj()); VisitForAccumulatorValue(prop->obj());
__ Move(StoreIC::ReceiverRegister(), r0); __ Move(StoreDescriptor::ReceiverRegister(), r0);
__ pop(StoreIC::ValueRegister()); // Restore value. __ pop(StoreDescriptor::ValueRegister()); // Restore value.
__ mov(StoreIC::NameRegister(), __ mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value())); Operand(prop->key()->AsLiteral()->value()));
CallStoreIC(); CallStoreIC();
break; break;
@ -2455,11 +2522,11 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ push(r0); // Preserve value. __ push(r0); // Preserve value.
VisitForStackValue(prop->obj()); VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key()); VisitForAccumulatorValue(prop->key());
__ Move(KeyedStoreIC::NameRegister(), r0); __ Move(StoreDescriptor::NameRegister(), r0);
__ Pop(KeyedStoreIC::ValueRegister(), KeyedStoreIC::ReceiverRegister()); __ Pop(StoreDescriptor::ValueRegister(),
Handle<Code> ic = strict_mode() == SLOPPY StoreDescriptor::ReceiverRegister());
? isolate()->builtins()->KeyedStoreIC_Initialize() Handle<Code> ic =
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict(); CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
CallIC(ic); CallIC(ic);
break; break;
} }
@ -2484,8 +2551,8 @@ void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) { void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
if (var->IsUnallocated()) { if (var->IsUnallocated()) {
// Global var, const, or let. // Global var, const, or let.
__ mov(StoreIC::NameRegister(), Operand(var->name())); __ mov(StoreDescriptor::NameRegister(), Operand(var->name()));
__ ldr(StoreIC::ReceiverRegister(), GlobalObjectOperand()); __ ldr(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
CallStoreIC(); CallStoreIC();
} else if (op == Token::INIT_CONST_LEGACY) { } else if (op == Token::INIT_CONST_LEGACY) {
@ -2557,8 +2624,9 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Record source code position before IC call. // Record source code position before IC call.
SetSourcePosition(expr->position()); SetSourcePosition(expr->position());
__ mov(StoreIC::NameRegister(), Operand(prop->key()->AsLiteral()->value())); __ mov(StoreDescriptor::NameRegister(),
__ pop(StoreIC::ReceiverRegister()); Operand(prop->key()->AsLiteral()->value()));
__ pop(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->AssignmentFeedbackId()); CallStoreIC(expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@ -2566,17 +2634,33 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
} }
void FullCodeGenerator::EmitNamedSuperPropertyAssignment(Assignment* expr) {
// Assignment to named property of super.
// r0 : value
// stack : receiver ('this'), home_object
Property* prop = expr->target()->AsProperty();
DCHECK(prop != NULL);
Literal* key = prop->key()->AsLiteral();
DCHECK(key != NULL);
__ Push(r0);
__ Push(key->value());
__ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreToSuper_Strict
: Runtime::kStoreToSuper_Sloppy),
4);
context()->Plug(r0);
}
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) { void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC. // Assignment to a property, using a keyed store IC.
// Record source code position before IC call. // Record source code position before IC call.
SetSourcePosition(expr->position()); SetSourcePosition(expr->position());
__ Pop(KeyedStoreIC::ReceiverRegister(), KeyedStoreIC::NameRegister()); __ Pop(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister());
DCHECK(KeyedStoreIC::ValueRegister().is(r0)); DCHECK(StoreDescriptor::ValueRegister().is(r0));
Handle<Code> ic = strict_mode() == SLOPPY Handle<Code> ic = CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
CallIC(ic, expr->AssignmentFeedbackId()); CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@ -2589,16 +2673,23 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
Expression* key = expr->key(); Expression* key = expr->key();
if (key->IsPropertyName()) { if (key->IsPropertyName()) {
if (!expr->IsSuperAccess()) {
VisitForAccumulatorValue(expr->obj()); VisitForAccumulatorValue(expr->obj());
__ Move(LoadIC::ReceiverRegister(), r0); __ Move(LoadDescriptor::ReceiverRegister(), r0);
EmitNamedPropertyLoad(expr); EmitNamedPropertyLoad(expr);
} else {
VisitForStackValue(expr->obj()->AsSuperReference()->this_var());
EmitLoadHomeObject(expr->obj()->AsSuperReference());
__ Push(result_register());
EmitNamedSuperPropertyLoad(expr);
}
PrepareForBailoutForId(expr->LoadId(), TOS_REG); PrepareForBailoutForId(expr->LoadId(), TOS_REG);
context()->Plug(r0); context()->Plug(r0);
} else { } else {
VisitForStackValue(expr->obj()); VisitForStackValue(expr->obj());
VisitForAccumulatorValue(expr->key()); VisitForAccumulatorValue(expr->key());
__ Move(LoadIC::NameRegister(), r0); __ Move(LoadDescriptor::NameRegister(), r0);
__ pop(LoadIC::ReceiverRegister()); __ pop(LoadDescriptor::ReceiverRegister());
EmitKeyedPropertyLoad(expr); EmitKeyedPropertyLoad(expr);
context()->Plug(r0); context()->Plug(r0);
} }
@ -2619,12 +2710,11 @@ void FullCodeGenerator::CallIC(Handle<Code> code,
void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) { void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression(); Expression* callee = expr->expression();
CallIC::CallType call_type = callee->IsVariableProxy() CallICState::CallType call_type =
? CallIC::FUNCTION callee->IsVariableProxy() ? CallICState::FUNCTION : CallICState::METHOD;
: CallIC::METHOD;
// Get the target function. // Get the target function.
if (call_type == CallIC::FUNCTION) { if (call_type == CallICState::FUNCTION) {
{ StackValueContext context(this); { StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy()); EmitVariableLoad(callee->AsVariableProxy());
PrepareForBailout(callee, NO_REGISTERS); PrepareForBailout(callee, NO_REGISTERS);
@ -2635,7 +2725,8 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
} else { } else {
// Load the function from the receiver. // Load the function from the receiver.
DCHECK(callee->IsProperty()); DCHECK(callee->IsProperty());
__ ldr(LoadIC::ReceiverRegister(), MemOperand(sp, 0)); DCHECK(!callee->AsProperty()->IsSuperAccess());
__ ldr(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
EmitNamedPropertyLoad(callee->AsProperty()); EmitNamedPropertyLoad(callee->AsProperty());
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG); PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
// Push the target function under the receiver. // Push the target function under the receiver.
@ -2648,6 +2739,45 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
} }
void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
DCHECK(callee->IsProperty());
Property* prop = callee->AsProperty();
DCHECK(prop->IsSuperAccess());
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
DCHECK(!key->value()->IsSmi());
// Load the function from the receiver.
const Register scratch = r1;
SuperReference* super_ref = prop->obj()->AsSuperReference();
EmitLoadHomeObject(super_ref);
__ Push(r0);
VisitForAccumulatorValue(super_ref->this_var());
__ Push(r0);
__ Push(r0);
__ ldr(scratch, MemOperand(sp, kPointerSize * 2));
__ Push(scratch);
__ Push(key->value());
// Stack here:
// - home_object
// - this (receiver)
// - this (receiver) <-- LoadFromSuper will pop here and below.
// - home_object
// - key
__ CallRuntime(Runtime::kLoadFromSuper, 3);
// Replace home_object with target function.
__ str(r0, MemOperand(sp, kPointerSize));
// Stack here:
// - target function
// - this (receiver)
EmitCall(expr, CallICState::METHOD);
}
// Code common for calls using the IC. // Code common for calls using the IC.
void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr, void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
Expression* key) { Expression* key) {
@ -2658,8 +2788,8 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
// Load the function from the receiver. // Load the function from the receiver.
DCHECK(callee->IsProperty()); DCHECK(callee->IsProperty());
__ ldr(LoadIC::ReceiverRegister(), MemOperand(sp, 0)); __ ldr(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
__ Move(LoadIC::NameRegister(), r0); __ Move(LoadDescriptor::NameRegister(), r0);
EmitKeyedPropertyLoad(callee->AsProperty()); EmitKeyedPropertyLoad(callee->AsProperty());
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG); PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
@ -2668,11 +2798,11 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
__ push(ip); __ push(ip);
__ str(r0, MemOperand(sp, kPointerSize)); __ str(r0, MemOperand(sp, kPointerSize));
EmitCall(expr, CallIC::METHOD); EmitCall(expr, CallICState::METHOD);
} }
void FullCodeGenerator::EmitCall(Call* expr, CallIC::CallType call_type) { void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
// Load the arguments. // Load the arguments.
ZoneList<Expression*>* args = expr->arguments(); ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length(); int arg_count = args->length();
@ -2817,14 +2947,21 @@ void FullCodeGenerator::VisitCall(Call* expr) {
EmitCall(expr); EmitCall(expr);
} else if (call_type == Call::PROPERTY_CALL) { } else if (call_type == Call::PROPERTY_CALL) {
Property* property = callee->AsProperty(); Property* property = callee->AsProperty();
{ PreservePositionScope scope(masm()->positions_recorder()); bool is_named_call = property->key()->IsPropertyName();
// super.x() is handled in EmitCallWithLoadIC.
if (property->IsSuperAccess() && is_named_call) {
EmitSuperCallWithLoadIC(expr);
} else {
{
PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(property->obj()); VisitForStackValue(property->obj());
} }
if (property->key()->IsPropertyName()) { if (is_named_call) {
EmitCallWithLoadIC(expr); EmitCallWithLoadIC(expr);
} else { } else {
EmitKeyedCallWithLoadIC(expr, property->key()); EmitKeyedCallWithLoadIC(expr, property->key());
} }
}
} else { } else {
DCHECK(call_type == Call::OTHER_CALL); DCHECK(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above. // Call to an arbitrary expression not handled specially above.
@ -3318,7 +3455,7 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
// Functions have class 'Function'. // Functions have class 'Function'.
__ bind(&function); __ bind(&function);
__ LoadRoot(r0, Heap::kfunction_class_stringRootIndex); __ LoadRoot(r0, Heap::kFunction_stringRootIndex);
__ jmp(&done); __ jmp(&done);
// Objects with a non-function constructor have class 'Object'. // Objects with a non-function constructor have class 'Object'.
@ -3436,9 +3573,9 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
Register index = r1; Register index = r1;
Register value = r2; Register value = r2;
VisitForStackValue(args->at(1)); // index VisitForStackValue(args->at(0)); // index
VisitForStackValue(args->at(2)); // value VisitForStackValue(args->at(1)); // value
VisitForAccumulatorValue(args->at(0)); // string VisitForAccumulatorValue(args->at(2)); // string
__ Pop(index, value); __ Pop(index, value);
if (FLAG_debug_code) { if (FLAG_debug_code) {
@ -3469,9 +3606,9 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
Register index = r1; Register index = r1;
Register value = r2; Register value = r2;
VisitForStackValue(args->at(1)); // index VisitForStackValue(args->at(0)); // index
VisitForStackValue(args->at(2)); // value VisitForStackValue(args->at(1)); // value
VisitForAccumulatorValue(args->at(0)); // string VisitForAccumulatorValue(args->at(2)); // string
__ Pop(index, value); __ Pop(index, value);
if (FLAG_debug_code) { if (FLAG_debug_code) {
@ -3812,7 +3949,7 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
} }
void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
Label bailout, done, one_char_separator, long_separator, non_trivial_array, Label bailout, done, one_char_separator, long_separator, non_trivial_array,
not_size_one_array, loop, empty_separator_loop, one_char_separator_loop, not_size_one_array, loop, empty_separator_loop, one_char_separator_loop,
one_char_separator_loop_entry, long_separator_loop; one_char_separator_loop_entry, long_separator_loop;
@ -3859,7 +3996,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ ldr(elements, FieldMemOperand(array, JSArray::kElementsOffset)); __ ldr(elements, FieldMemOperand(array, JSArray::kElementsOffset));
array = no_reg; // End of array's live range. array = no_reg; // End of array's live range.
// Check that all array elements are sequential ASCII strings, and // Check that all array elements are sequential one-byte strings, and
// accumulate the sum of their lengths, as a smi-encoded value. // accumulate the sum of their lengths, as a smi-encoded value.
__ mov(string_length, Operand::Zero()); __ mov(string_length, Operand::Zero());
__ add(element, __ add(element,
@ -3875,14 +4012,14 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// elements_end: Array end. // elements_end: Array end.
if (generate_debug_code_) { if (generate_debug_code_) {
__ cmp(array_length, Operand::Zero()); __ cmp(array_length, Operand::Zero());
__ Assert(gt, kNoEmptyArraysHereInEmitFastAsciiArrayJoin); __ Assert(gt, kNoEmptyArraysHereInEmitFastOneByteArrayJoin);
} }
__ bind(&loop); __ bind(&loop);
__ ldr(string, MemOperand(element, kPointerSize, PostIndex)); __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
__ JumpIfSmi(string, &bailout); __ JumpIfSmi(string, &bailout);
__ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset)); __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
__ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &bailout); __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch, scratch, &bailout);
__ ldr(scratch, FieldMemOperand(string, SeqOneByteString::kLengthOffset)); __ ldr(scratch, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
__ add(string_length, string_length, Operand(scratch), SetCC); __ add(string_length, string_length, Operand(scratch), SetCC);
__ b(vs, &bailout); __ b(vs, &bailout);
@ -3903,11 +4040,11 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// string_length: Sum of string lengths (smi). // string_length: Sum of string lengths (smi).
// elements: FixedArray of strings. // elements: FixedArray of strings.
// Check that the separator is a flat ASCII string. // Check that the separator is a flat one-byte string.
__ JumpIfSmi(separator, &bailout); __ JumpIfSmi(separator, &bailout);
__ ldr(scratch, FieldMemOperand(separator, HeapObject::kMapOffset)); __ ldr(scratch, FieldMemOperand(separator, HeapObject::kMapOffset));
__ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &bailout); __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch, scratch, &bailout);
// Add (separator length times array_length) - separator length to the // Add (separator length times array_length) - separator length to the
// string_length to get the length of the result string. array_length is not // string_length to get the length of the result string. array_length is not
@ -3936,9 +4073,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// separator: Separator string // separator: Separator string
// string_length: Length of result string (not smi) // string_length: Length of result string (not smi)
// array_length: Length of the array. // array_length: Length of the array.
__ AllocateAsciiString(result, __ AllocateOneByteString(result, string_length, scratch,
string_length,
scratch,
string, // used as scratch string, // used as scratch
elements_end, // used as scratch elements_end, // used as scratch
&bailout); &bailout);
@ -3980,7 +4115,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// One-character separator case // One-character separator case
__ bind(&one_char_separator); __ bind(&one_char_separator);
// Replace separator with its ASCII character value. // Replace separator with its one-byte character value.
__ ldrb(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize)); __ ldrb(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
// Jump into the loop after the code that copies the separator, so the first // Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator // element is not preceded by a separator
@ -3991,7 +4126,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// result_pos: the position to which we are currently copying characters. // result_pos: the position to which we are currently copying characters.
// element: Current array element. // element: Current array element.
// elements_end: Array end. // elements_end: Array end.
// separator: Single separator ASCII char (in lower byte). // separator: Single separator one-byte char (in lower byte).
// Copy the separator character to the result. // Copy the separator character to the result.
__ strb(separator, MemOperand(result_pos, 1, PostIndex)); __ strb(separator, MemOperand(result_pos, 1, PostIndex));
@ -4072,15 +4207,15 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
if (expr->is_jsruntime()) { if (expr->is_jsruntime()) {
// Push the builtins object as the receiver. // Push the builtins object as the receiver.
Register receiver = LoadIC::ReceiverRegister(); Register receiver = LoadDescriptor::ReceiverRegister();
__ ldr(receiver, GlobalObjectOperand()); __ ldr(receiver, GlobalObjectOperand());
__ ldr(receiver, FieldMemOperand(receiver, GlobalObject::kBuiltinsOffset)); __ ldr(receiver, FieldMemOperand(receiver, GlobalObject::kBuiltinsOffset));
__ push(receiver); __ push(receiver);
// Load the function from the receiver. // Load the function from the receiver.
__ mov(LoadIC::NameRegister(), Operand(expr->name())); __ mov(LoadDescriptor::NameRegister(), Operand(expr->name()));
if (FLAG_vector_ics) { if (FLAG_vector_ics) {
__ mov(LoadIC::SlotRegister(), __ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(expr->CallRuntimeFeedbackSlot()))); Operand(Smi::FromInt(expr->CallRuntimeFeedbackSlot())));
CallLoadIC(NOT_CONTEXTUAL); CallLoadIC(NOT_CONTEXTUAL);
} else { } else {
@ -4247,6 +4382,11 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (prop != NULL) { if (prop != NULL) {
assign_type = assign_type =
(prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY; (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
if (prop->IsSuperAccess()) {
// throw exception.
VisitSuperReference(prop->obj()->AsSuperReference());
return;
}
} }
// Evaluate expression and get value. // Evaluate expression and get value.
@ -4263,13 +4403,14 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (assign_type == NAMED_PROPERTY) { if (assign_type == NAMED_PROPERTY) {
// Put the object both on the stack and in the register. // Put the object both on the stack and in the register.
VisitForStackValue(prop->obj()); VisitForStackValue(prop->obj());
__ ldr(LoadIC::ReceiverRegister(), MemOperand(sp, 0)); __ ldr(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
EmitNamedPropertyLoad(prop); EmitNamedPropertyLoad(prop);
} else { } else {
VisitForStackValue(prop->obj()); VisitForStackValue(prop->obj());
VisitForStackValue(prop->key()); VisitForStackValue(prop->key());
__ ldr(LoadIC::ReceiverRegister(), MemOperand(sp, 1 * kPointerSize)); __ ldr(LoadDescriptor::ReceiverRegister(),
__ ldr(LoadIC::NameRegister(), MemOperand(sp, 0)); MemOperand(sp, 1 * kPointerSize));
__ ldr(LoadDescriptor::NameRegister(), MemOperand(sp, 0));
EmitKeyedPropertyLoad(prop); EmitKeyedPropertyLoad(prop);
} }
} }
@ -4349,8 +4490,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Record position before stub call. // Record position before stub call.
SetSourcePosition(expr->position()); SetSourcePosition(expr->position());
BinaryOpICStub stub(isolate(), Token::ADD, NO_OVERWRITE); Handle<Code> code =
CallIC(stub.GetCode(), expr->CountBinOpFeedbackId()); CodeFactory::BinaryOpIC(isolate(), Token::ADD, NO_OVERWRITE).code();
CallIC(code, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo(); patch_site.EmitPatchInfo();
__ bind(&done); __ bind(&done);
@ -4377,9 +4519,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
} }
break; break;
case NAMED_PROPERTY: { case NAMED_PROPERTY: {
__ mov(StoreIC::NameRegister(), __ mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value())); Operand(prop->key()->AsLiteral()->value()));
__ pop(StoreIC::ReceiverRegister()); __ pop(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->CountStoreFeedbackId()); CallStoreIC(expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) { if (expr->is_postfix()) {
@ -4392,10 +4534,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break; break;
} }
case KEYED_PROPERTY: { case KEYED_PROPERTY: {
__ Pop(KeyedStoreIC::ReceiverRegister(), KeyedStoreIC::NameRegister()); __ Pop(StoreDescriptor::ReceiverRegister(),
Handle<Code> ic = strict_mode() == SLOPPY StoreDescriptor::NameRegister());
? isolate()->builtins()->KeyedStoreIC_Initialize() Handle<Code> ic =
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict(); CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
CallIC(ic, expr->CountStoreFeedbackId()); CallIC(ic, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) { if (expr->is_postfix()) {
@ -4417,10 +4559,10 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
VariableProxy* proxy = expr->AsVariableProxy(); VariableProxy* proxy = expr->AsVariableProxy();
if (proxy != NULL && proxy->var()->IsUnallocated()) { if (proxy != NULL && proxy->var()->IsUnallocated()) {
Comment cmnt(masm_, "[ Global variable"); Comment cmnt(masm_, "[ Global variable");
__ ldr(LoadIC::ReceiverRegister(), GlobalObjectOperand()); __ ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
__ mov(LoadIC::NameRegister(), Operand(proxy->name())); __ mov(LoadDescriptor::NameRegister(), Operand(proxy->name()));
if (FLAG_vector_ics) { if (FLAG_vector_ics) {
__ mov(LoadIC::SlotRegister(), __ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(Smi::FromInt(proxy->VariableFeedbackSlot()))); Operand(Smi::FromInt(proxy->VariableFeedbackSlot())));
} }
// Use a regular load, not a contextual load, to avoid a reference // Use a regular load, not a contextual load, to avoid a reference
@ -4585,7 +4727,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC. // Record position and call the compare IC.
SetSourcePosition(expr->position()); SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallIC(ic, expr->CompareOperationFeedbackId()); CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo(); patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@ -4785,14 +4927,35 @@ static Address GetInterruptImmediateLoadAddress(Address pc) {
DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(load_address))); DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(load_address)));
} else if (Assembler::IsLdrPpRegOffset(Memory::int32_at(load_address))) { } else if (Assembler::IsLdrPpRegOffset(Memory::int32_at(load_address))) {
// This is an extended constant pool lookup. // This is an extended constant pool lookup.
if (CpuFeatures::IsSupported(ARMv7)) {
load_address -= 2 * Assembler::kInstrSize; load_address -= 2 * Assembler::kInstrSize;
DCHECK(Assembler::IsMovW(Memory::int32_at(load_address))); DCHECK(Assembler::IsMovW(Memory::int32_at(load_address)));
DCHECK(Assembler::IsMovT( DCHECK(Assembler::IsMovT(
Memory::int32_at(load_address + Assembler::kInstrSize))); Memory::int32_at(load_address + Assembler::kInstrSize)));
} else if (Assembler::IsMovT(Memory::int32_at(load_address))) { } else {
// This is a movw_movt immediate load. load_address -= 4 * Assembler::kInstrSize;
DCHECK(Assembler::IsMovImmed(Memory::int32_at(load_address)));
DCHECK(Assembler::IsOrrImmed(
Memory::int32_at(load_address + Assembler::kInstrSize)));
DCHECK(Assembler::IsOrrImmed(
Memory::int32_at(load_address + 2 * Assembler::kInstrSize)));
DCHECK(Assembler::IsOrrImmed(
Memory::int32_at(load_address + 3 * Assembler::kInstrSize)));
}
} else if (CpuFeatures::IsSupported(ARMv7) &&
Assembler::IsMovT(Memory::int32_at(load_address))) {
// This is a movw / movt immediate load.
load_address -= Assembler::kInstrSize; load_address -= Assembler::kInstrSize;
DCHECK(Assembler::IsMovW(Memory::int32_at(load_address))); DCHECK(Assembler::IsMovW(Memory::int32_at(load_address)));
} else if (!CpuFeatures::IsSupported(ARMv7) &&
Assembler::IsOrrImmed(Memory::int32_at(load_address))) {
// This is a mov / orr immediate load.
load_address -= 3 * Assembler::kInstrSize;
DCHECK(Assembler::IsMovImmed(Memory::int32_at(load_address)));
DCHECK(Assembler::IsOrrImmed(
Memory::int32_at(load_address + Assembler::kInstrSize)));
DCHECK(Assembler::IsOrrImmed(
Memory::int32_at(load_address + 2 * Assembler::kInstrSize)));
} else { } else {
// This is a small constant pool lookup. // This is a small constant pool lookup.
DCHECK(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(load_address))); DCHECK(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(load_address)));
@ -4813,11 +4976,17 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
{ {
// <decrement profiling counter> // <decrement profiling counter>
// bpl ok // bpl ok
// ; load interrupt stub address into ip - either of: // ; load interrupt stub address into ip - either of (for ARMv7):
// ; <small cp load> | <extended cp load> | <immediate load> // ; <small cp load> | <extended cp load> | <immediate load>
// ldr ip, [pc/pp, #imm] | movw ip, #imm | movw ip, #imm // ldr ip, [pc/pp, #imm] | movw ip, #imm | movw ip, #imm
// | movt ip, #imm> | movw ip, #imm // | movt ip, #imm | movw ip, #imm
// | ldr ip, [pp, ip] // | ldr ip, [pp, ip]
// ; or (for ARMv6):
// ; <small cp load> | <extended cp load> | <immediate load>
// ldr ip, [pc/pp, #imm] | mov ip, #imm | mov ip, #imm
// | orr ip, ip, #imm> | orr ip, ip, #imm
// | orr ip, ip, #imm> | orr ip, ip, #imm
// | orr ip, ip, #imm> | orr ip, ip, #imm
// blx ip // blx ip
// <reset profiling counter> // <reset profiling counter>
// ok-label // ok-label
@ -4834,11 +5003,17 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code,
case OSR_AFTER_STACK_CHECK: case OSR_AFTER_STACK_CHECK:
// <decrement profiling counter> // <decrement profiling counter>
// mov r0, r0 (NOP) // mov r0, r0 (NOP)
// ; load on-stack replacement address into ip - either of: // ; load on-stack replacement address into ip - either of (for ARMv7):
// ; <small cp load> | <extended cp load> | <immediate load> // ; <small cp load> | <extended cp load> | <immediate load>
// ldr ip, [pc/pp, #imm] | movw ip, #imm | movw ip, #imm // ldr ip, [pc/pp, #imm] | movw ip, #imm | movw ip, #imm
// | movt ip, #imm> | movw ip, #imm // | movt ip, #imm> | movw ip, #imm
// | ldr ip, [pp, ip] // | ldr ip, [pp, ip]
// ; or (for ARMv6):
// ; <small cp load> | <extended cp load> | <immediate load>
// ldr ip, [pc/pp, #imm] | mov ip, #imm | mov ip, #imm
// | orr ip, ip, #imm> | orr ip, ip, #imm
// | orr ip, ip, #imm> | orr ip, ip, #imm
// | orr ip, ip, #imm> | orr ip, ip, #imm
// blx ip // blx ip
// <reset profiling counter> // <reset profiling counter>
// ok-label // ok-label

View File

@ -0,0 +1,323 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM
#include "src/interface-descriptors.h"
namespace v8 {
namespace internal {
const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
const Register LoadDescriptor::ReceiverRegister() { return r1; }
const Register LoadDescriptor::NameRegister() { return r2; }
const Register VectorLoadICTrampolineDescriptor::SlotRegister() { return r0; }
const Register VectorLoadICDescriptor::VectorRegister() { return r3; }
const Register StoreDescriptor::ReceiverRegister() { return r1; }
const Register StoreDescriptor::NameRegister() { return r2; }
const Register StoreDescriptor::ValueRegister() { return r0; }
const Register ElementTransitionAndStoreDescriptor::MapRegister() { return r3; }
const Register InstanceofDescriptor::left() { return r0; }
const Register InstanceofDescriptor::right() { return r1; }
const Register ArgumentsAccessReadDescriptor::index() { return r1; }
const Register ArgumentsAccessReadDescriptor::parameter_count() { return r0; }
const Register ApiGetterDescriptor::function_address() { return r2; }
const Register MathPowTaggedDescriptor::exponent() { return r2; }
const Register MathPowIntegerDescriptor::exponent() {
return MathPowTaggedDescriptor::exponent();
}
void FastNewClosureDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r2};
data->Initialize(arraysize(registers), registers, NULL);
}
void FastNewContextDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r1};
data->Initialize(arraysize(registers), registers, NULL);
}
void ToNumberDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r0};
data->Initialize(arraysize(registers), registers, NULL);
}
void NumberToStringDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r0};
data->Initialize(arraysize(registers), registers, NULL);
}
void FastCloneShallowArrayDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r3, r2, r1};
Representation representations[] = {
Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
Representation::Tagged()};
data->Initialize(arraysize(registers), registers, representations);
}
void FastCloneShallowObjectDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r3, r2, r1, r0};
data->Initialize(arraysize(registers), registers, NULL);
}
void CreateAllocationSiteDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r2, r3};
data->Initialize(arraysize(registers), registers, NULL);
}
void StoreArrayLiteralElementDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r3, r0};
data->Initialize(arraysize(registers), registers, NULL);
}
void CallFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r1};
data->Initialize(arraysize(registers), registers, NULL);
}
void CallFunctionWithFeedbackDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r1, r3};
Representation representations[] = {Representation::Tagged(),
Representation::Tagged(),
Representation::Smi()};
data->Initialize(arraysize(registers), registers, representations);
}
void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// r0 : number of arguments
// r1 : the function to call
// r2 : feedback vector
// r3 : (only if r2 is not the megamorphic symbol) slot in feedback
// vector (Smi)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
Register registers[] = {cp, r0, r1, r2};
data->Initialize(arraysize(registers), registers, NULL);
}
void RegExpConstructResultDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r2, r1, r0};
data->Initialize(arraysize(registers), registers, NULL);
}
void TransitionElementsKindDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r0, r1};
data->Initialize(arraysize(registers), registers, NULL);
}
void ArrayConstructorConstantArgCountDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
// register state
// cp -- context
// r0 -- number of arguments
// r1 -- function
// r2 -- allocation site with elements kind
Register registers[] = {cp, r1, r2};
data->Initialize(arraysize(registers), registers, NULL);
}
void ArrayConstructorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
Register registers[] = {cp, r1, r2, r0};
Representation representations[] = {
Representation::Tagged(), Representation::Tagged(),
Representation::Tagged(), Representation::Integer32()};
data->Initialize(arraysize(registers), registers, representations);
}
void InternalArrayConstructorConstantArgCountDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
// register state
// cp -- context
// r0 -- number of arguments
// r1 -- constructor function
Register registers[] = {cp, r1};
data->Initialize(arraysize(registers), registers, NULL);
}
void InternalArrayConstructorDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
Register registers[] = {cp, r1, r0};
Representation representations[] = {Representation::Tagged(),
Representation::Tagged(),
Representation::Integer32()};
data->Initialize(arraysize(registers), registers, representations);
}
void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r0};
data->Initialize(arraysize(registers), registers, NULL);
}
void ToBooleanDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r0};
data->Initialize(arraysize(registers), registers, NULL);
}
void BinaryOpDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r1, r0};
data->Initialize(arraysize(registers), registers, NULL);
}
void BinaryOpWithAllocationSiteDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r2, r1, r0};
data->Initialize(arraysize(registers), registers, NULL);
}
void StringAddDescriptor::Initialize(CallInterfaceDescriptorData* data) {
Register registers[] = {cp, r1, r0};
data->Initialize(arraysize(registers), registers, NULL);
}
void KeyedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor noInlineDescriptor =
PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
Register registers[] = {
cp, // context
r2, // key
};
Representation representations[] = {
Representation::Tagged(), // context
Representation::Tagged(), // key
};
data->Initialize(arraysize(registers), registers, representations,
&noInlineDescriptor);
}
void NamedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor noInlineDescriptor =
PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
Register registers[] = {
cp, // context
r2, // name
};
Representation representations[] = {
Representation::Tagged(), // context
Representation::Tagged(), // name
};
data->Initialize(arraysize(registers), registers, representations,
&noInlineDescriptor);
}
void CallHandlerDescriptor::Initialize(CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
cp, // context
r0, // receiver
};
Representation representations[] = {
Representation::Tagged(), // context
Representation::Tagged(), // receiver
};
data->Initialize(arraysize(registers), registers, representations,
&default_descriptor);
}
void ArgumentAdaptorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
cp, // context
r1, // JSFunction
r0, // actual number of arguments
r2, // expected number of arguments
};
Representation representations[] = {
Representation::Tagged(), // context
Representation::Tagged(), // JSFunction
Representation::Integer32(), // actual number of arguments
Representation::Integer32(), // expected number of arguments
};
data->Initialize(arraysize(registers), registers, representations,
&default_descriptor);
}
void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
cp, // context
r0, // callee
r4, // call_data
r2, // holder
r1, // api_function_address
};
Representation representations[] = {
Representation::Tagged(), // context
Representation::Tagged(), // callee
Representation::Tagged(), // call_data
Representation::Tagged(), // holder
Representation::External(), // api_function_address
};
data->Initialize(arraysize(registers), registers, representations,
&default_descriptor);
}
}
} // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM

View File

@ -0,0 +1,26 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_ARM_INTERFACE_DESCRIPTORS_ARM_H_
#define V8_ARM_INTERFACE_DESCRIPTORS_ARM_H_
#include "src/interface-descriptors.h"
namespace v8 {
namespace internal {
class PlatformInterfaceDescriptor {
public:
explicit PlatformInterfaceDescriptor(TargetAddressStorageMode storage_mode)
: storage_mode_(storage_mode) {}
TargetAddressStorageMode storage_mode() { return storage_mode_; }
private:
TargetAddressStorageMode storage_mode_;
};
}
} // namespace v8::internal
#endif // V8_ARM_INTERFACE_DESCRIPTORS_ARM_H_

View File

@ -423,12 +423,6 @@ LPlatformChunk* LChunkBuilder::Build() {
} }
void LChunkBuilder::Abort(BailoutReason reason) {
info()->set_bailout_reason(reason);
status_ = ABORTED;
}
LUnallocated* LChunkBuilder::ToUnallocated(Register reg) { LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER, return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
Register::ToAllocationIndex(reg)); Register::ToAllocationIndex(reg));
@ -1082,14 +1076,14 @@ LInstruction* LChunkBuilder::DoCallJSFunction(
LInstruction* LChunkBuilder::DoCallWithDescriptor( LInstruction* LChunkBuilder::DoCallWithDescriptor(
HCallWithDescriptor* instr) { HCallWithDescriptor* instr) {
const InterfaceDescriptor* descriptor = instr->descriptor(); CallInterfaceDescriptor descriptor = instr->descriptor();
LOperand* target = UseRegisterOrConstantAtStart(instr->target()); LOperand* target = UseRegisterOrConstantAtStart(instr->target());
ZoneList<LOperand*> ops(instr->OperandCount(), zone()); ZoneList<LOperand*> ops(instr->OperandCount(), zone());
ops.Add(target, zone()); ops.Add(target, zone());
for (int i = 1; i < instr->OperandCount(); i++) { for (int i = 1; i < instr->OperandCount(); i++) {
LOperand* op = UseFixed(instr->OperandAt(i), LOperand* op =
descriptor->GetParameterRegister(i - 1)); UseFixed(instr->OperandAt(i), descriptor.GetParameterRegister(i - 1));
ops.Add(op, zone()); ops.Add(op, zone());
} }
@ -1099,6 +1093,19 @@ LInstruction* LChunkBuilder::DoCallWithDescriptor(
} }
LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
HTailCallThroughMegamorphicCache* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* receiver_register =
UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
LOperand* name_register =
UseFixed(instr->name(), LoadDescriptor::NameRegister());
// Not marked as call. It can't deoptimize, and it never returns.
return new (zone()) LTailCallThroughMegamorphicCache(
context, receiver_register, name_register);
}
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) { LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), cp); LOperand* context = UseFixed(instr->context(), cp);
LOperand* function = UseFixed(instr->function(), r1); LOperand* function = UseFixed(instr->function(), r1);
@ -1678,9 +1685,10 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
Representation exponent_type = instr->right()->representation(); Representation exponent_type = instr->right()->representation();
DCHECK(instr->left()->representation().IsDouble()); DCHECK(instr->left()->representation().IsDouble());
LOperand* left = UseFixedDouble(instr->left(), d0); LOperand* left = UseFixedDouble(instr->left(), d0);
LOperand* right = exponent_type.IsDouble() ? LOperand* right =
UseFixedDouble(instr->right(), d1) : exponent_type.IsDouble()
UseFixed(instr->right(), r2); ? UseFixedDouble(instr->right(), d1)
: UseFixed(instr->right(), MathPowTaggedDescriptor::exponent());
LPower* result = new(zone()) LPower(left, right); LPower* result = new(zone()) LPower(left, right);
return MarkAsCall(DefineFixedDouble(result, d2), return MarkAsCall(DefineFixedDouble(result, d2),
instr, instr,
@ -2102,11 +2110,11 @@ LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) { LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp); LOperand* context = UseFixed(instr->context(), cp);
LOperand* global_object = UseFixed(instr->global_object(), LOperand* global_object =
LoadIC::ReceiverRegister()); UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL; LOperand* vector = NULL;
if (FLAG_vector_ics) { if (FLAG_vector_ics) {
vector = FixedTemp(LoadIC::VectorRegister()); vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
} }
LLoadGlobalGeneric* result = LLoadGlobalGeneric* result =
new(zone()) LLoadGlobalGeneric(context, global_object, vector); new(zone()) LLoadGlobalGeneric(context, global_object, vector);
@ -2161,10 +2169,11 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) { LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp); LOperand* context = UseFixed(instr->context(), cp);
LOperand* object = UseFixed(instr->object(), LoadIC::ReceiverRegister()); LOperand* object =
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL; LOperand* vector = NULL;
if (FLAG_vector_ics) { if (FLAG_vector_ics) {
vector = FixedTemp(LoadIC::VectorRegister()); vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
} }
LInstruction* result = LInstruction* result =
@ -2226,11 +2235,12 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) { LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp); LOperand* context = UseFixed(instr->context(), cp);
LOperand* object = UseFixed(instr->object(), LoadIC::ReceiverRegister()); LOperand* object =
LOperand* key = UseFixed(instr->key(), LoadIC::NameRegister()); UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
LOperand* vector = NULL; LOperand* vector = NULL;
if (FLAG_vector_ics) { if (FLAG_vector_ics) {
vector = FixedTemp(LoadIC::VectorRegister()); vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
} }
LInstruction* result = LInstruction* result =
@ -2286,9 +2296,10 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) { LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp); LOperand* context = UseFixed(instr->context(), cp);
LOperand* obj = UseFixed(instr->object(), KeyedStoreIC::ReceiverRegister()); LOperand* obj =
LOperand* key = UseFixed(instr->key(), KeyedStoreIC::NameRegister()); UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
LOperand* val = UseFixed(instr->value(), KeyedStoreIC::ValueRegister()); LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
DCHECK(instr->object()->representation().IsTagged()); DCHECK(instr->object()->representation().IsTagged());
DCHECK(instr->key()->representation().IsTagged()); DCHECK(instr->key()->representation().IsTagged());
@ -2345,7 +2356,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
} }
LOperand* val; LOperand* val;
if (needs_write_barrier || instr->field_representation().IsSmi()) { if (needs_write_barrier) {
val = UseTempRegister(instr->value()); val = UseTempRegister(instr->value());
} else if (instr->field_representation().IsDouble()) { } else if (instr->field_representation().IsDouble()) {
val = UseRegisterAtStart(instr->value()); val = UseRegisterAtStart(instr->value());
@ -2362,8 +2373,9 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) { LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp); LOperand* context = UseFixed(instr->context(), cp);
LOperand* obj = UseFixed(instr->object(), StoreIC::ReceiverRegister()); LOperand* obj =
LOperand* val = UseFixed(instr->value(), StoreIC::ValueRegister()); UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
LInstruction* result = new(zone()) LStoreNamedGeneric(context, obj, val); LInstruction* result = new(zone()) LStoreNamedGeneric(context, obj, val);
return MarkAsCall(result, instr); return MarkAsCall(result, instr);
@ -2439,10 +2451,10 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
return DefineAsSpilled(result, spill_index); return DefineAsSpilled(result, spill_index);
} else { } else {
DCHECK(info()->IsStub()); DCHECK(info()->IsStub());
CodeStubInterfaceDescriptor* descriptor = CallInterfaceDescriptor descriptor =
info()->code_stub()->GetInterfaceDescriptor(); info()->code_stub()->GetCallInterfaceDescriptor();
int index = static_cast<int>(instr->index()); int index = static_cast<int>(instr->index());
Register reg = descriptor->GetEnvironmentParameterRegister(index); Register reg = descriptor.GetEnvironmentParameterRegister(index);
return DefineFixed(result, reg); return DefineFixed(result, reg);
} }
} }
@ -2458,7 +2470,7 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
} else { } else {
spill_index = env_index - instr->environment()->first_local_index(); spill_index = env_index - instr->environment()->first_local_index();
if (spill_index > LUnallocated::kMaxFixedSlotIndex) { if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
Abort(kTooManySpillSlotsNeededForOSR); Retry(kTooManySpillSlotsNeededForOSR);
spill_index = 0; spill_index = 0;
} }
} }
@ -2555,6 +2567,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) { if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
inner->Bind(instr->arguments_var(), instr->arguments_object()); inner->Bind(instr->arguments_var(), instr->arguments_object());
} }
inner->BindContext(instr->closure_context());
inner->set_entry(instr); inner->set_entry(instr);
current_block_->UpdateEnvironment(inner); current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure()); chunk_->AddInlinedClosure(instr->closure());

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -26,7 +26,7 @@ class LCodeGen: public LCodeGenBase {
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: LCodeGenBase(chunk, assembler, info), : LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()), deoptimizations_(4, info->zone()),
deopt_jump_table_(4, info->zone()), jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()), deoptimization_literals_(8, info->zone()),
inlined_function_count_(0), inlined_function_count_(0),
scope_(info->scope()), scope_(info->scope()),
@ -169,10 +169,10 @@ class LCodeGen: public LCodeGenBase {
// Code generation passes. Returns true if code generation should // Code generation passes. Returns true if code generation should
// continue. // continue.
void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE; void GenerateBodyInstructionPre(LInstruction* instr) OVERRIDE;
bool GeneratePrologue(); bool GeneratePrologue();
bool GenerateDeferredCode(); bool GenerateDeferredCode();
bool GenerateDeoptJumpTable(); bool GenerateJumpTable();
bool GenerateSafepointTable(); bool GenerateSafepointTable();
// Generates the custom OSR entrypoint and sets the osr_pc_offset. // Generates the custom OSR entrypoint and sets the osr_pc_offset.
@ -234,10 +234,10 @@ class LCodeGen: public LCodeGenBase {
void RegisterEnvironmentForDeoptimization(LEnvironment* environment, void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode); Safepoint::DeoptMode mode);
void DeoptimizeIf(Condition condition, void DeoptimizeIf(Condition condition, LInstruction* instr,
LEnvironment* environment, const char* detail, Deoptimizer::BailoutType bailout_type);
Deoptimizer::BailoutType bailout_type); void DeoptimizeIf(Condition condition, LInstruction* instr,
void DeoptimizeIf(Condition condition, LEnvironment* environment); const char* detail);
void AddToTranslation(LEnvironment* environment, void AddToTranslation(LEnvironment* environment,
Translation* translation, Translation* translation,
@ -271,7 +271,7 @@ class LCodeGen: public LCodeGenBase {
int arguments, int arguments,
Safepoint::DeoptMode mode); Safepoint::DeoptMode mode);
void RecordAndWritePosition(int position) V8_OVERRIDE; void RecordAndWritePosition(int position) OVERRIDE;
static Condition TokenToCondition(Token::Value op, bool is_unsigned); static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block); void EmitGoto(int block);
@ -281,12 +281,8 @@ class LCodeGen: public LCodeGenBase {
void EmitBranch(InstrType instr, Condition condition); void EmitBranch(InstrType instr, Condition condition);
template<class InstrType> template<class InstrType>
void EmitFalseBranch(InstrType instr, Condition condition); void EmitFalseBranch(InstrType instr, Condition condition);
void EmitNumberUntagD(Register input, void EmitNumberUntagD(LNumberUntagD* instr, Register input,
DwVfpRegister result, DwVfpRegister result, NumberUntagDMode mode);
bool allow_undefined_as_nan,
bool deoptimize_on_minus_zero,
LEnvironment* env,
NumberUntagDMode mode);
// Emits optimized code for typeof x == "y". Modifies input register. // Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to // Returns the condition on which a final split to
@ -324,7 +320,7 @@ class LCodeGen: public LCodeGenBase {
int* offset, int* offset,
AllocationSiteMode mode); AllocationSiteMode mode);
void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE; void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE;
void DoLoadKeyedExternalArray(LLoadKeyed* instr); void DoLoadKeyedExternalArray(LLoadKeyed* instr);
void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr); void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
void DoLoadKeyedFixedArray(LLoadKeyed* instr); void DoLoadKeyedFixedArray(LLoadKeyed* instr);
@ -332,8 +328,11 @@ class LCodeGen: public LCodeGenBase {
void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr); void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
void DoStoreKeyedFixedArray(LStoreKeyed* instr); void DoStoreKeyedFixedArray(LStoreKeyed* instr);
template <class T>
void EmitVectorLoadICRegisters(T* instr);
ZoneList<LEnvironment*> deoptimizations_; ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_; ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_; ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_; int inlined_function_count_;
Scope* const scope_; Scope* const scope_;
@ -351,7 +350,7 @@ class LCodeGen: public LCodeGenBase {
Safepoint::Kind expected_safepoint_kind_; Safepoint::Kind expected_safepoint_kind_;
class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED { class PushSafepointRegistersScope FINAL BASE_EMBEDDED {
public: public:
explicit PushSafepointRegistersScope(LCodeGen* codegen) explicit PushSafepointRegistersScope(LCodeGen* codegen)
: codegen_(codegen) { : codegen_(codegen) {

View File

@ -15,7 +15,7 @@ namespace internal {
class LCodeGen; class LCodeGen;
class LGapResolver; class LGapResolver;
class LGapResolver V8_FINAL BASE_EMBEDDED { class LGapResolver FINAL BASE_EMBEDDED {
public: public:
explicit LGapResolver(LCodeGen* owner); explicit LGapResolver(LCodeGen* owner);

View File

@ -8,12 +8,14 @@
#if V8_TARGET_ARCH_ARM #if V8_TARGET_ARCH_ARM
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h" #include "src/bootstrapper.h"
#include "src/codegen.h" #include "src/codegen.h"
#include "src/cpu-profiler.h" #include "src/cpu-profiler.h"
#include "src/debug.h" #include "src/debug.h"
#include "src/isolate-inl.h" #include "src/isolate-inl.h"
#include "src/runtime.h" #include "src/runtime/runtime.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -270,7 +272,7 @@ void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
} else if (!(src2.instructions_required(this) == 1) && } else if (!(src2.instructions_required(this) == 1) &&
!src2.must_output_reloc_info(this) && !src2.must_output_reloc_info(this) &&
CpuFeatures::IsSupported(ARMv7) && CpuFeatures::IsSupported(ARMv7) &&
IsPowerOf2(src2.immediate() + 1)) { base::bits::IsPowerOfTwo32(src2.immediate() + 1)) {
ubfx(dst, src1, 0, ubfx(dst, src1, 0,
WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond); WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
} else { } else {
@ -498,8 +500,8 @@ void MacroAssembler::RecordWriteField(
// Clobber clobbered input registers when running with the debug-code flag // Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors. // turned on to provoke errors.
if (emit_debug_code()) { if (emit_debug_code()) {
mov(value, Operand(BitCast<int32_t>(kZapValue + 4))); mov(value, Operand(bit_cast<int32_t>(kZapValue + 4)));
mov(dst, Operand(BitCast<int32_t>(kZapValue + 8))); mov(dst, Operand(bit_cast<int32_t>(kZapValue + 8)));
} }
} }
@ -568,8 +570,8 @@ void MacroAssembler::RecordWriteForMap(Register object,
// Clobber clobbered registers when running with the debug-code flag // Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors. // turned on to provoke errors.
if (emit_debug_code()) { if (emit_debug_code()) {
mov(dst, Operand(BitCast<int32_t>(kZapValue + 12))); mov(dst, Operand(bit_cast<int32_t>(kZapValue + 12)));
mov(map, Operand(BitCast<int32_t>(kZapValue + 16))); mov(map, Operand(bit_cast<int32_t>(kZapValue + 16)));
} }
} }
@ -640,8 +642,8 @@ void MacroAssembler::RecordWrite(
// Clobber clobbered registers when running with the debug-code flag // Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors. // turned on to provoke errors.
if (emit_debug_code()) { if (emit_debug_code()) {
mov(address, Operand(BitCast<int32_t>(kZapValue + 12))); mov(address, Operand(bit_cast<int32_t>(kZapValue + 12)));
mov(value, Operand(BitCast<int32_t>(kZapValue + 16))); mov(value, Operand(bit_cast<int32_t>(kZapValue + 16)));
} }
} }
@ -677,8 +679,7 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Ret(eq); Ret(eq);
} }
push(lr); push(lr);
StoreBufferOverflowStub store_buffer_overflow = StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
StoreBufferOverflowStub(isolate(), fp_mode);
CallStub(&store_buffer_overflow); CallStub(&store_buffer_overflow);
pop(lr); pop(lr);
bind(&done); bind(&done);
@ -1075,7 +1076,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
sub(sp, sp, Operand((stack_space + 1) * kPointerSize)); sub(sp, sp, Operand((stack_space + 1) * kPointerSize));
if (frame_alignment > 0) { if (frame_alignment > 0) {
DCHECK(IsPowerOf2(frame_alignment)); DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
and_(sp, sp, Operand(-frame_alignment)); and_(sp, sp, Operand(-frame_alignment));
} }
@ -1989,10 +1990,8 @@ void MacroAssembler::AllocateTwoByteString(Register result,
} }
void MacroAssembler::AllocateAsciiString(Register result, void MacroAssembler::AllocateOneByteString(Register result, Register length,
Register length, Register scratch1, Register scratch2,
Register scratch1,
Register scratch2,
Register scratch3, Register scratch3,
Label* gc_required) { Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string while // Calculate the number of bytes needed for the characters in the string while
@ -2003,7 +2002,7 @@ void MacroAssembler::AllocateAsciiString(Register result,
Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize)); Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
and_(scratch1, scratch1, Operand(~kObjectAlignmentMask)); and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate ASCII string in new space. // Allocate one-byte string in new space.
Allocate(scratch1, Allocate(scratch1,
result, result,
scratch2, scratch2,
@ -2012,11 +2011,8 @@ void MacroAssembler::AllocateAsciiString(Register result,
TAG_OBJECT); TAG_OBJECT);
// Set the map, length and hash field. // Set the map, length and hash field.
InitializeNewString(result, InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
length, scratch1, scratch2);
Heap::kAsciiStringMapRootIndex,
scratch1,
scratch2);
} }
@ -2036,8 +2032,7 @@ void MacroAssembler::AllocateTwoByteConsString(Register result,
} }
void MacroAssembler::AllocateAsciiConsString(Register result, void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
Register length,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
Label* gc_required) { Label* gc_required) {
@ -2048,11 +2043,8 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
gc_required, gc_required,
TAG_OBJECT); TAG_OBJECT);
InitializeNewString(result, InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
length, scratch1, scratch2);
Heap::kConsAsciiStringMapRootIndex,
scratch1,
scratch2);
} }
@ -2072,7 +2064,7 @@ void MacroAssembler::AllocateTwoByteSlicedString(Register result,
} }
void MacroAssembler::AllocateAsciiSlicedString(Register result, void MacroAssembler::AllocateOneByteSlicedString(Register result,
Register length, Register length,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
@ -2080,11 +2072,8 @@ void MacroAssembler::AllocateAsciiSlicedString(Register result,
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required, Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
TAG_OBJECT); TAG_OBJECT);
InitializeNewString(result, InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
length, scratch1, scratch2);
Heap::kSlicedAsciiStringMapRootIndex,
scratch1,
scratch2);
} }
@ -3183,27 +3172,21 @@ void MacroAssembler::LookupNumberStringCache(Register object,
} }
void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings( void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
Register first, Register first, Register second, Register scratch1, Register scratch2,
Register second,
Register scratch1,
Register scratch2,
Label* failure) { Label* failure) {
// Test that both first and second are sequential ASCII strings. // Test that both first and second are sequential one-byte strings.
// Assume that they are non-smis. // Assume that they are non-smis.
ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset)); ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset)); ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset)); ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1, JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
scratch2, scratch2, failure);
scratch1,
scratch2,
failure);
} }
void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first, void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
Register second, Register second,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
@ -3211,15 +3194,12 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
// Check that neither is a smi. // Check that neither is a smi.
and_(scratch1, first, Operand(second)); and_(scratch1, first, Operand(second));
JumpIfSmi(scratch1, failure); JumpIfSmi(scratch1, failure);
JumpIfNonSmisNotBothSequentialAsciiStrings(first, JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
second, scratch2, failure);
scratch1,
scratch2,
failure);
} }
void MacroAssembler::JumpIfNotUniqueName(Register reg, void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
Label* not_unique_name) { Label* not_unique_name) {
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
Label succeed; Label succeed;
@ -3385,34 +3365,31 @@ void MacroAssembler::RestoreFPRegs(Register location, Register scratch) {
} }
void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii( void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
Register first, Register first, Register second, Register scratch1, Register scratch2,
Register second,
Register scratch1,
Register scratch2,
Label* failure) { Label* failure) {
const int kFlatAsciiStringMask = const int kFlatOneByteStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
const int kFlatAsciiStringTag = const int kFlatOneByteStringTag =
kStringTag | kOneByteStringTag | kSeqStringTag; kStringTag | kOneByteStringTag | kSeqStringTag;
and_(scratch1, first, Operand(kFlatAsciiStringMask)); and_(scratch1, first, Operand(kFlatOneByteStringMask));
and_(scratch2, second, Operand(kFlatAsciiStringMask)); and_(scratch2, second, Operand(kFlatOneByteStringMask));
cmp(scratch1, Operand(kFlatAsciiStringTag)); cmp(scratch1, Operand(kFlatOneByteStringTag));
// Ignore second test if first test failed. // Ignore second test if first test failed.
cmp(scratch2, Operand(kFlatAsciiStringTag), eq); cmp(scratch2, Operand(kFlatOneByteStringTag), eq);
b(ne, failure); b(ne, failure);
} }
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type, void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
Register scratch, Register scratch,
Label* failure) { Label* failure) {
const int kFlatAsciiStringMask = const int kFlatOneByteStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
const int kFlatAsciiStringTag = const int kFlatOneByteStringTag =
kStringTag | kOneByteStringTag | kSeqStringTag; kStringTag | kOneByteStringTag | kSeqStringTag;
and_(scratch, type, Operand(kFlatAsciiStringMask)); and_(scratch, type, Operand(kFlatOneByteStringMask));
cmp(scratch, Operand(kFlatAsciiStringTag)); cmp(scratch, Operand(kFlatOneByteStringTag));
b(ne, failure); b(ne, failure);
} }
@ -3489,7 +3466,7 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
// and the original value of sp. // and the original value of sp.
mov(scratch, sp); mov(scratch, sp);
sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize)); sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
DCHECK(IsPowerOf2(frame_alignment)); DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
and_(sp, sp, Operand(-frame_alignment)); and_(sp, sp, Operand(-frame_alignment));
str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
} else { } else {
@ -3568,7 +3545,7 @@ void MacroAssembler::CallCFunctionHelper(Register function,
int frame_alignment = base::OS::ActivationFrameAlignment(); int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1; int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) { if (frame_alignment > kPointerSize) {
DCHECK(IsPowerOf2(frame_alignment)); DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
Label alignment_as_expected; Label alignment_as_expected;
tst(sp, Operand(frame_alignment_mask)); tst(sp, Operand(frame_alignment_mask));
b(eq, &alignment_as_expected); b(eq, &alignment_as_expected);
@ -3826,8 +3803,8 @@ void MacroAssembler::EnsureNotWhite(
mov(length, Operand(ExternalString::kSize), LeaveCC, ne); mov(length, Operand(ExternalString::kSize), LeaveCC, ne);
b(ne, &is_data_object); b(ne, &is_data_object);
// Sequential string, either ASCII or UC16. // Sequential string, either Latin1 or UC16.
// For ASCII (char-size of 1) we shift the smi tag away to get the length. // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
// For UC16 (char-size of 2) we just leave the smi tag in place, thereby // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
// getting the length multiplied by 2. // getting the length multiplied by 2.
DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4); DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
@ -4093,16 +4070,18 @@ void MacroAssembler::TruncatingDiv(Register result,
DCHECK(!dividend.is(result)); DCHECK(!dividend.is(result));
DCHECK(!dividend.is(ip)); DCHECK(!dividend.is(ip));
DCHECK(!result.is(ip)); DCHECK(!result.is(ip));
MultiplierAndShift ms(divisor); base::MagicNumbersForDivision<uint32_t> mag =
mov(ip, Operand(ms.multiplier())); base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
mov(ip, Operand(mag.multiplier));
smull(ip, result, dividend, ip); smull(ip, result, dividend, ip);
if (divisor > 0 && ms.multiplier() < 0) { bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
if (divisor > 0 && neg) {
add(result, result, Operand(dividend)); add(result, result, Operand(dividend));
} }
if (divisor < 0 && ms.multiplier() > 0) { if (divisor < 0 && !neg && mag.multiplier > 0) {
sub(result, result, Operand(dividend)); sub(result, result, Operand(dividend));
} }
if (ms.shift() > 0) mov(result, Operand(result, ASR, ms.shift())); if (mag.shift > 0) mov(result, Operand(result, ASR, mag.shift));
add(result, result, Operand(dividend, LSR, 31)); add(result, result, Operand(dividend, LSR, 31));
} }

View File

@ -6,6 +6,7 @@
#define V8_ARM_MACRO_ASSEMBLER_ARM_H_ #define V8_ARM_MACRO_ASSEMBLER_ARM_H_
#include "src/assembler.h" #include "src/assembler.h"
#include "src/bailout-reason.h"
#include "src/frames.h" #include "src/frames.h"
#include "src/globals.h" #include "src/globals.h"
@ -152,8 +153,11 @@ class MacroAssembler: public Assembler {
// Register move. May do nothing if the registers are identical. // Register move. May do nothing if the registers are identical.
void Move(Register dst, Handle<Object> value); void Move(Register dst, Handle<Object> value);
void Move(Register dst, Register src, Condition cond = al); void Move(Register dst, Register src, Condition cond = al);
void Move(Register dst, const Operand& src, Condition cond = al) { void Move(Register dst, const Operand& src, SBit sbit = LeaveCC,
if (!src.is_reg() || !src.rm().is(dst)) mov(dst, src, LeaveCC, cond); Condition cond = al) {
if (!src.is_reg() || !src.rm().is(dst) || sbit != LeaveCC) {
mov(dst, src, sbit, cond);
}
} }
void Move(DwVfpRegister dst, DwVfpRegister src); void Move(DwVfpRegister dst, DwVfpRegister src);
@ -750,31 +754,24 @@ class MacroAssembler: public Assembler {
Register scratch2, Register scratch2,
Register scratch3, Register scratch3,
Label* gc_required); Label* gc_required);
void AllocateAsciiString(Register result, void AllocateOneByteString(Register result, Register length,
Register length, Register scratch1, Register scratch2,
Register scratch1, Register scratch3, Label* gc_required);
Register scratch2,
Register scratch3,
Label* gc_required);
void AllocateTwoByteConsString(Register result, void AllocateTwoByteConsString(Register result,
Register length, Register length,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
Label* gc_required); Label* gc_required);
void AllocateAsciiConsString(Register result, void AllocateOneByteConsString(Register result, Register length,
Register length, Register scratch1, Register scratch2,
Register scratch1,
Register scratch2,
Label* gc_required); Label* gc_required);
void AllocateTwoByteSlicedString(Register result, void AllocateTwoByteSlicedString(Register result,
Register length, Register length,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
Label* gc_required); Label* gc_required);
void AllocateAsciiSlicedString(Register result, void AllocateOneByteSlicedString(Register result, Register length,
Register length, Register scratch1, Register scratch2,
Register scratch1,
Register scratch2,
Label* gc_required); Label* gc_required);
// Allocates a heap number or jumps to the gc_required label if the young // Allocates a heap number or jumps to the gc_required label if the young
@ -1318,38 +1315,33 @@ class MacroAssembler: public Assembler {
Register scratch3, Register scratch3,
Label* not_found); Label* not_found);
// Checks if both objects are sequential ASCII strings and jumps to label // Checks if both objects are sequential one-byte strings and jumps to label
// if either is not. Assumes that neither object is a smi. // if either is not. Assumes that neither object is a smi.
void JumpIfNonSmisNotBothSequentialAsciiStrings(Register object1, void JumpIfNonSmisNotBothSequentialOneByteStrings(Register object1,
Register object2, Register object2,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
Label* failure); Label* failure);
// Checks if both objects are sequential ASCII strings and jumps to label // Checks if both objects are sequential one-byte strings and jumps to label
// if either is not. // if either is not.
void JumpIfNotBothSequentialAsciiStrings(Register first, void JumpIfNotBothSequentialOneByteStrings(Register first, Register second,
Register second,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
Label* not_flat_ascii_strings); Label* not_flat_one_byte_strings);
// Checks if both instance types are sequential ASCII strings and jumps to // Checks if both instance types are sequential one-byte strings and jumps to
// label if either is not. // label if either is not.
void JumpIfBothInstanceTypesAreNotSequentialAscii( void JumpIfBothInstanceTypesAreNotSequentialOneByte(
Register first_object_instance_type, Register first_object_instance_type, Register second_object_instance_type,
Register second_object_instance_type, Register scratch1, Register scratch2, Label* failure);
Register scratch1,
Register scratch2,
Label* failure);
// Check if instance type is sequential ASCII string and jump to label if // Check if instance type is sequential one-byte string and jump to label if
// it is not. // it is not.
void JumpIfInstanceTypeIsNotSequentialAscii(Register type, void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
Register scratch,
Label* failure); Label* failure);
void JumpIfNotUniqueName(Register reg, Label* not_unique_name); void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
void EmitSeqStringSetCharCheck(Register string, void EmitSeqStringSetCharCheck(Register string,
Register index, Register index,

View File

@ -238,7 +238,7 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
__ cmn(r1, Operand(current_input_offset())); __ cmn(r1, Operand(current_input_offset()));
BranchOrBacktrack(gt, on_no_match); BranchOrBacktrack(gt, on_no_match);
if (mode_ == ASCII) { if (mode_ == LATIN1) {
Label success; Label success;
Label fail; Label fail;
Label loop_check; Label loop_check;
@ -354,7 +354,7 @@ void RegExpMacroAssemblerARM::CheckNotBackReference(
Label loop; Label loop;
__ bind(&loop); __ bind(&loop);
if (mode_ == ASCII) { if (mode_ == LATIN1) {
__ ldrb(r3, MemOperand(r0, char_size(), PostIndex)); __ ldrb(r3, MemOperand(r0, char_size(), PostIndex));
__ ldrb(r4, MemOperand(r2, char_size(), PostIndex)); __ ldrb(r4, MemOperand(r2, char_size(), PostIndex));
} else { } else {
@ -443,7 +443,7 @@ void RegExpMacroAssemblerARM::CheckBitInTable(
Handle<ByteArray> table, Handle<ByteArray> table,
Label* on_bit_set) { Label* on_bit_set) {
__ mov(r0, Operand(table)); __ mov(r0, Operand(table));
if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) { if (mode_ != LATIN1 || kTableMask != String::kMaxOneByteCharCode) {
__ and_(r1, current_character(), Operand(kTableSize - 1)); __ and_(r1, current_character(), Operand(kTableSize - 1));
__ add(r1, r1, Operand(ByteArray::kHeaderSize - kHeapObjectTag)); __ add(r1, r1, Operand(ByteArray::kHeaderSize - kHeapObjectTag));
} else { } else {
@ -464,7 +464,7 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
switch (type) { switch (type) {
case 's': case 's':
// Match space-characters // Match space-characters
if (mode_ == ASCII) { if (mode_ == LATIN1) {
// One byte space characters are '\t'..'\r', ' ' and \u00a0. // One byte space characters are '\t'..'\r', ' ' and \u00a0.
Label success; Label success;
__ cmp(current_character(), Operand(' ')); __ cmp(current_character(), Operand(' '));
@ -518,7 +518,7 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
__ sub(r0, r0, Operand(0x0b)); __ sub(r0, r0, Operand(0x0b));
__ cmp(r0, Operand(0x0c - 0x0b)); __ cmp(r0, Operand(0x0c - 0x0b));
if (mode_ == ASCII) { if (mode_ == LATIN1) {
BranchOrBacktrack(hi, on_no_match); BranchOrBacktrack(hi, on_no_match);
} else { } else {
Label done; Label done;
@ -534,8 +534,8 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
return true; return true;
} }
case 'w': { case 'w': {
if (mode_ != ASCII) { if (mode_ != LATIN1) {
// Table is 128 entries, so all ASCII characters can be tested. // Table is 256 entries, so all Latin1 characters can be tested.
__ cmp(current_character(), Operand('z')); __ cmp(current_character(), Operand('z'));
BranchOrBacktrack(hi, on_no_match); BranchOrBacktrack(hi, on_no_match);
} }
@ -548,8 +548,8 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
} }
case 'W': { case 'W': {
Label done; Label done;
if (mode_ != ASCII) { if (mode_ != LATIN1) {
// Table is 128 entries, so all ASCII characters can be tested. // Table is 256 entries, so all Latin1 characters can be tested.
__ cmp(current_character(), Operand('z')); __ cmp(current_character(), Operand('z'));
__ b(hi, &done); __ b(hi, &done);
} }
@ -558,7 +558,7 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
__ ldrb(r0, MemOperand(r0, current_character())); __ ldrb(r0, MemOperand(r0, current_character()));
__ cmp(r0, Operand::Zero()); __ cmp(r0, Operand::Zero());
BranchOrBacktrack(ne, on_no_match); BranchOrBacktrack(ne, on_no_match);
if (mode_ != ASCII) { if (mode_ != LATIN1) {
__ bind(&done); __ bind(&done);
} }
return true; return true;
@ -1067,7 +1067,7 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
Handle<String> subject(frame_entry<String*>(re_frame, kInputString)); Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
// Current string. // Current string.
bool is_ascii = subject->IsOneByteRepresentationUnderneath(); bool is_one_byte = subject->IsOneByteRepresentationUnderneath();
DCHECK(re_code->instruction_start() <= *return_address); DCHECK(re_code->instruction_start() <= *return_address);
DCHECK(*return_address <= DCHECK(*return_address <=
@ -1098,8 +1098,8 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
} }
// String might have changed. // String might have changed.
if (subject_tmp->IsOneByteRepresentation() != is_ascii) { if (subject_tmp->IsOneByteRepresentation() != is_one_byte) {
// If we changed between an ASCII and an UC16 string, the specialized // If we changed between an Latin1 and an UC16 string, the specialized
// code cannot be used, and we need to restart regexp matching from // code cannot be used, and we need to restart regexp matching from
// scratch (including, potentially, compiling a new version of the code). // scratch (including, potentially, compiling a new version of the code).
return RETRY; return RETRY;
@ -1249,7 +1249,7 @@ void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset,
DCHECK(characters == 1); DCHECK(characters == 1);
} }
if (mode_ == ASCII) { if (mode_ == LATIN1) {
if (characters == 4) { if (characters == 4) {
__ ldr(current_character(), MemOperand(end_of_input_address(), offset)); __ ldr(current_character(), MemOperand(end_of_input_address(), offset));
} else if (characters == 2) { } else if (characters == 2) {

View File

@ -190,7 +190,7 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
MacroAssembler* masm_; MacroAssembler* masm_;
// Which mode to generate code for (ASCII or UC16). // Which mode to generate code for (Latin1 or UC16).
Mode mode_; Mode mode_;
// One greater than maximal register index actually used. // One greater than maximal register index actually used.

View File

@ -311,7 +311,7 @@ void ArmDebugger::Debug() {
} }
for (int i = 0; i < DwVfpRegister::NumRegisters(); i++) { for (int i = 0; i < DwVfpRegister::NumRegisters(); i++) {
dvalue = GetVFPDoubleRegisterValue(i); dvalue = GetVFPDoubleRegisterValue(i);
uint64_t as_words = BitCast<uint64_t>(dvalue); uint64_t as_words = bit_cast<uint64_t>(dvalue);
PrintF("%3s: %f 0x%08x %08x\n", PrintF("%3s: %f 0x%08x %08x\n",
VFPRegisters::Name(i, true), VFPRegisters::Name(i, true),
dvalue, dvalue,
@ -322,10 +322,10 @@ void ArmDebugger::Debug() {
if (GetValue(arg1, &value)) { if (GetValue(arg1, &value)) {
PrintF("%s: 0x%08x %d \n", arg1, value, value); PrintF("%s: 0x%08x %d \n", arg1, value, value);
} else if (GetVFPSingleValue(arg1, &svalue)) { } else if (GetVFPSingleValue(arg1, &svalue)) {
uint32_t as_word = BitCast<uint32_t>(svalue); uint32_t as_word = bit_cast<uint32_t>(svalue);
PrintF("%s: %f 0x%08x\n", arg1, svalue, as_word); PrintF("%s: %f 0x%08x\n", arg1, svalue, as_word);
} else if (GetVFPDoubleValue(arg1, &dvalue)) { } else if (GetVFPDoubleValue(arg1, &dvalue)) {
uint64_t as_words = BitCast<uint64_t>(dvalue); uint64_t as_words = bit_cast<uint64_t>(dvalue);
PrintF("%s: %f 0x%08x %08x\n", PrintF("%s: %f 0x%08x %08x\n",
arg1, arg1,
dvalue, dvalue,

View File

@ -457,7 +457,7 @@ MemOperand::MemOperand()
} }
MemOperand::MemOperand(Register base, ptrdiff_t offset, AddrMode addrmode) MemOperand::MemOperand(Register base, int64_t offset, AddrMode addrmode)
: base_(base), regoffset_(NoReg), offset_(offset), addrmode_(addrmode), : base_(base), regoffset_(NoReg), offset_(offset), addrmode_(addrmode),
shift_(NO_SHIFT), extend_(NO_EXTEND), shift_amount_(0) { shift_(NO_SHIFT), extend_(NO_EXTEND), shift_amount_(0) {
DCHECK(base.Is64Bits() && !base.IsZero()); DCHECK(base.Is64Bits() && !base.IsZero());

View File

@ -33,6 +33,7 @@
#define ARM64_DEFINE_REG_STATICS #define ARM64_DEFINE_REG_STATICS
#include "src/arm64/assembler-arm64-inl.h" #include "src/arm64/assembler-arm64-inl.h"
#include "src/base/bits.h"
#include "src/base/cpu.h" #include "src/base/cpu.h"
namespace v8 { namespace v8 {
@ -227,7 +228,7 @@ bool AreAliased(const CPURegister& reg1, const CPURegister& reg2,
const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8}; const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8};
for (unsigned i = 0; i < ARRAY_SIZE(regs); i++) { for (unsigned i = 0; i < arraysize(regs); i++) {
if (regs[i].IsRegister()) { if (regs[i].IsRegister()) {
number_of_valid_regs++; number_of_valid_regs++;
unique_regs |= regs[i].Bit(); unique_regs |= regs[i].Bit();
@ -601,7 +602,7 @@ void Assembler::GetCode(CodeDesc* desc) {
void Assembler::Align(int m) { void Assembler::Align(int m) {
DCHECK(m >= 4 && IsPowerOf2(m)); DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
while ((pc_offset() & (m - 1)) != 0) { while ((pc_offset() & (m - 1)) != 0) {
nop(); nop();
} }
@ -2208,6 +2209,17 @@ void Assembler::brk(int code) {
} }
void Assembler::EmitStringData(const char* string) {
size_t len = strlen(string) + 1;
DCHECK(RoundUp(len, kInstructionSize) <= static_cast<size_t>(kGap));
EmitData(string, len);
// Pad with NULL characters until pc_ is aligned.
const char pad[] = {'\0', '\0', '\0', '\0'};
STATIC_ASSERT(sizeof(pad) == kInstructionSize);
EmitData(pad, RoundUp(pc_offset(), kInstructionSize) - pc_offset());
}
void Assembler::debug(const char* message, uint32_t code, Instr params) { void Assembler::debug(const char* message, uint32_t code, Instr params) {
#ifdef USE_SIMULATOR #ifdef USE_SIMULATOR
// Don't generate simulator specific code if we are building a snapshot, which // Don't generate simulator specific code if we are building a snapshot, which
@ -2443,7 +2455,7 @@ void Assembler::LoadStore(const CPURegister& rt,
const MemOperand& addr, const MemOperand& addr,
LoadStoreOp op) { LoadStoreOp op) {
Instr memop = op | Rt(rt) | RnSP(addr.base()); Instr memop = op | Rt(rt) | RnSP(addr.base());
ptrdiff_t offset = addr.offset(); int64_t offset = addr.offset();
if (addr.IsImmediateOffset()) { if (addr.IsImmediateOffset()) {
LSDataSize size = CalcLSDataSize(op); LSDataSize size = CalcLSDataSize(op);
@ -2492,18 +2504,18 @@ void Assembler::LoadStore(const CPURegister& rt,
} }
bool Assembler::IsImmLSUnscaled(ptrdiff_t offset) { bool Assembler::IsImmLSUnscaled(int64_t offset) {
return is_int9(offset); return is_int9(offset);
} }
bool Assembler::IsImmLSScaled(ptrdiff_t offset, LSDataSize size) { bool Assembler::IsImmLSScaled(int64_t offset, LSDataSize size) {
bool offset_is_size_multiple = (((offset >> size) << size) == offset); bool offset_is_size_multiple = (((offset >> size) << size) == offset);
return offset_is_size_multiple && is_uint12(offset >> size); return offset_is_size_multiple && is_uint12(offset >> size);
} }
bool Assembler::IsImmLSPair(ptrdiff_t offset, LSDataSize size) { bool Assembler::IsImmLSPair(int64_t offset, LSDataSize size) {
bool offset_is_size_multiple = (((offset >> size) << size) == offset); bool offset_is_size_multiple = (((offset >> size) << size) == offset);
return offset_is_size_multiple && is_int7(offset >> size); return offset_is_size_multiple && is_int7(offset >> size);
} }
@ -2664,7 +2676,7 @@ bool Assembler::IsImmLogical(uint64_t value,
int multiplier_idx = CountLeadingZeros(d, kXRegSizeInBits) - 57; int multiplier_idx = CountLeadingZeros(d, kXRegSizeInBits) - 57;
// Ensure that the index to the multipliers array is within bounds. // Ensure that the index to the multipliers array is within bounds.
DCHECK((multiplier_idx >= 0) && DCHECK((multiplier_idx >= 0) &&
(static_cast<size_t>(multiplier_idx) < ARRAY_SIZE(multipliers))); (static_cast<size_t>(multiplier_idx) < arraysize(multipliers)));
uint64_t multiplier = multipliers[multiplier_idx]; uint64_t multiplier = multipliers[multiplier_idx];
uint64_t candidate = (b - a) * multiplier; uint64_t candidate = (b - a) * multiplier;
@ -3091,7 +3103,7 @@ void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
} }
void PatchingAssembler::PatchAdrFar(ptrdiff_t target_offset) { void PatchingAssembler::PatchAdrFar(int64_t target_offset) {
// The code at the current instruction should be: // The code at the current instruction should be:
// adr rd, 0 // adr rd, 0
// nop (adr_far) // nop (adr_far)

View File

@ -276,6 +276,11 @@ struct FPRegister : public CPURegister {
(kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1); (kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1);
static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; } static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
// TODO(turbofan): Proper float32 support.
static int NumAllocatableAliasedRegisters() {
return NumAllocatableRegisters();
}
// Return true if the register is one that crankshaft can allocate. // Return true if the register is one that crankshaft can allocate.
bool IsAllocatable() const { bool IsAllocatable() const {
return (Bit() & kAllocatableFPRegisters) != 0; return (Bit() & kAllocatableFPRegisters) != 0;
@ -699,7 +704,7 @@ class MemOperand {
public: public:
inline MemOperand(); inline MemOperand();
inline explicit MemOperand(Register base, inline explicit MemOperand(Register base,
ptrdiff_t offset = 0, int64_t offset = 0,
AddrMode addrmode = Offset); AddrMode addrmode = Offset);
inline explicit MemOperand(Register base, inline explicit MemOperand(Register base,
Register regoffset, Register regoffset,
@ -715,7 +720,7 @@ class MemOperand {
const Register& base() const { return base_; } const Register& base() const { return base_; }
const Register& regoffset() const { return regoffset_; } const Register& regoffset() const { return regoffset_; }
ptrdiff_t offset() const { return offset_; } int64_t offset() const { return offset_; }
AddrMode addrmode() const { return addrmode_; } AddrMode addrmode() const { return addrmode_; }
Shift shift() const { return shift_; } Shift shift() const { return shift_; }
Extend extend() const { return extend_; } Extend extend() const { return extend_; }
@ -742,7 +747,7 @@ class MemOperand {
private: private:
Register base_; Register base_;
Register regoffset_; Register regoffset_;
ptrdiff_t offset_; int64_t offset_;
AddrMode addrmode_; AddrMode addrmode_;
Shift shift_; Shift shift_;
Extend extend_; Extend extend_;
@ -1733,16 +1738,7 @@ class Assembler : public AssemblerBase {
// Copy a string into the instruction stream, including the terminating NULL // Copy a string into the instruction stream, including the terminating NULL
// character. The instruction pointer (pc_) is then aligned correctly for // character. The instruction pointer (pc_) is then aligned correctly for
// subsequent instructions. // subsequent instructions.
void EmitStringData(const char * string) { void EmitStringData(const char* string);
size_t len = strlen(string) + 1;
DCHECK(RoundUp(len, kInstructionSize) <= static_cast<size_t>(kGap));
EmitData(string, len);
// Pad with NULL characters until pc_ is aligned.
const char pad[] = {'\0', '\0', '\0', '\0'};
STATIC_ASSERT(sizeof(pad) == kInstructionSize);
byte* next_pc = AlignUp(pc_, kInstructionSize);
EmitData(&pad, next_pc - pc_);
}
// Pseudo-instructions ------------------------------------------------------ // Pseudo-instructions ------------------------------------------------------
@ -1859,6 +1855,9 @@ class Assembler : public AssemblerBase {
inline static Instr ImmBarrierType(int imm2); inline static Instr ImmBarrierType(int imm2);
inline static LSDataSize CalcLSDataSize(LoadStoreOp op); inline static LSDataSize CalcLSDataSize(LoadStoreOp op);
static bool IsImmLSUnscaled(int64_t offset);
static bool IsImmLSScaled(int64_t offset, LSDataSize size);
// Move immediates encoding. // Move immediates encoding.
inline static Instr ImmMoveWide(uint64_t imm); inline static Instr ImmMoveWide(uint64_t imm);
inline static Instr ShiftMoveWide(int64_t shift); inline static Instr ShiftMoveWide(int64_t shift);
@ -1942,12 +1941,10 @@ class Assembler : public AssemblerBase {
void LoadStore(const CPURegister& rt, void LoadStore(const CPURegister& rt,
const MemOperand& addr, const MemOperand& addr,
LoadStoreOp op); LoadStoreOp op);
static bool IsImmLSUnscaled(ptrdiff_t offset);
static bool IsImmLSScaled(ptrdiff_t offset, LSDataSize size);
void LoadStorePair(const CPURegister& rt, const CPURegister& rt2, void LoadStorePair(const CPURegister& rt, const CPURegister& rt2,
const MemOperand& addr, LoadStorePairOp op); const MemOperand& addr, LoadStorePairOp op);
static bool IsImmLSPair(ptrdiff_t offset, LSDataSize size); static bool IsImmLSPair(int64_t offset, LSDataSize size);
void Logical(const Register& rd, void Logical(const Register& rd,
const Register& rn, const Register& rn,
@ -2292,7 +2289,7 @@ class PatchingAssembler : public Assembler {
// See definition of PatchAdrFar() for details. // See definition of PatchAdrFar() for details.
static const int kAdrFarPatchableNNops = 2; static const int kAdrFarPatchableNNops = 2;
static const int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 2; static const int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 2;
void PatchAdrFar(ptrdiff_t target_offset); void PatchAdrFar(int64_t target_offset);
}; };

View File

@ -10,8 +10,7 @@
#include "src/debug.h" #include "src/debug.h"
#include "src/deoptimizer.h" #include "src/deoptimizer.h"
#include "src/full-codegen.h" #include "src/full-codegen.h"
#include "src/runtime.h" #include "src/runtime/runtime.h"
#include "src/stub-cache.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -781,8 +780,8 @@ void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
} }
void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) { void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
CallRuntimePassFunction(masm, Runtime::kCompileUnoptimized); CallRuntimePassFunction(masm, Runtime::kCompileLazy);
GenerateTailCallToReturnedCode(masm); GenerateTailCallToReturnedCode(masm);
} }

File diff suppressed because it is too large Load Diff

View File

@ -5,8 +5,6 @@
#ifndef V8_ARM64_CODE_STUBS_ARM64_H_ #ifndef V8_ARM64_CODE_STUBS_ARM64_H_
#define V8_ARM64_CODE_STUBS_ARM64_H_ #define V8_ARM64_CODE_STUBS_ARM64_H_
#include "src/ic-inl.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -14,42 +12,25 @@ namespace internal {
void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code); void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
class StoreBufferOverflowStub: public PlatformCodeStub {
public:
StoreBufferOverflowStub(Isolate* isolate, SaveFPRegsMode save_fp)
: PlatformCodeStub(isolate), save_doubles_(save_fp) { }
void Generate(MacroAssembler* masm);
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
private:
SaveFPRegsMode save_doubles_;
Major MajorKey() const { return StoreBufferOverflow; }
int MinorKey() const { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
};
class StringHelper : public AllStatic { class StringHelper : public AllStatic {
public: public:
// TODO(all): These don't seem to be used any more. Delete them. // Compares two flat one-byte strings and returns result in x0.
static void GenerateCompareFlatOneByteStrings(
MacroAssembler* masm, Register left, Register right, Register scratch1,
Register scratch2, Register scratch3, Register scratch4);
// Generate string hash. // Compare two flat one-byte strings for equality and returns result in x0.
static void GenerateHashInit(MacroAssembler* masm, static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
Register hash, Register left, Register right,
Register character); Register scratch1,
Register scratch2,
static void GenerateHashAddCharacter(MacroAssembler* masm, Register scratch3);
Register hash,
Register character);
static void GenerateHashGetHash(MacroAssembler* masm,
Register hash,
Register scratch);
private: private:
static void GenerateOneByteCharsCompareLoop(
MacroAssembler* masm, Register left, Register right, Register length,
Register scratch1, Register scratch2, Label* chars_not_equal);
DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper); DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
}; };
@ -60,12 +41,12 @@ class StoreRegistersStateStub: public PlatformCodeStub {
: PlatformCodeStub(isolate) {} : PlatformCodeStub(isolate) {}
static Register to_be_pushed_lr() { return ip0; } static Register to_be_pushed_lr() { return ip0; }
static void GenerateAheadOfTime(Isolate* isolate);
private:
Major MajorKey() const { return StoreRegistersState; }
int MinorKey() const { return 0; }
void Generate(MacroAssembler* masm); static void GenerateAheadOfTime(Isolate* isolate);
private:
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(StoreRegistersState, PlatformCodeStub);
}; };
@ -75,11 +56,10 @@ class RestoreRegistersStateStub: public PlatformCodeStub {
: PlatformCodeStub(isolate) {} : PlatformCodeStub(isolate) {}
static void GenerateAheadOfTime(Isolate* isolate); static void GenerateAheadOfTime(Isolate* isolate);
private:
Major MajorKey() const { return RestoreRegistersState; }
int MinorKey() const { return 0; }
void Generate(MacroAssembler* masm); private:
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(RestoreRegistersState, PlatformCodeStub);
}; };
@ -95,16 +75,22 @@ class RecordWriteStub: public PlatformCodeStub {
RememberedSetAction remembered_set_action, RememberedSetAction remembered_set_action,
SaveFPRegsMode fp_mode) SaveFPRegsMode fp_mode)
: PlatformCodeStub(isolate), : PlatformCodeStub(isolate),
object_(object),
value_(value),
address_(address),
remembered_set_action_(remembered_set_action),
save_fp_regs_mode_(fp_mode),
regs_(object, // An input reg. regs_(object, // An input reg.
address, // An input reg. address, // An input reg.
value) { // One scratch reg. value) { // One scratch reg.
DCHECK(object.Is64Bits());
DCHECK(value.Is64Bits());
DCHECK(address.Is64Bits());
minor_key_ = ObjectBits::encode(object.code()) |
ValueBits::encode(value.code()) |
AddressBits::encode(address.code()) |
RememberedSetActionBits::encode(remembered_set_action) |
SaveFPRegsModeBits::encode(fp_mode);
} }
RecordWriteStub(uint32_t key, Isolate* isolate)
: PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
enum Mode { enum Mode {
STORE_BUFFER_ONLY, STORE_BUFFER_ONLY,
INCREMENTAL, INCREMENTAL,
@ -176,6 +162,8 @@ class RecordWriteStub: public PlatformCodeStub {
DCHECK(GetMode(stub) == mode); DCHECK(GetMode(stub) == mode);
} }
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
private: private:
// This is a helper class to manage the registers associated with the stub. // This is a helper class to manage the registers associated with the stub.
// The 'object' and 'address' registers must be preserved. // The 'object' and 'address' registers must be preserved.
@ -282,62 +270,51 @@ class RecordWriteStub: public PlatformCodeStub {
friend class RecordWriteStub; friend class RecordWriteStub;
}; };
// A list of stub variants which are pregenerated.
// The variants are stored in the same format as the minor key, so
// MinorKeyFor() can be used to populate and check this list.
static const int kAheadOfTime[];
void Generate(MacroAssembler* masm);
void GenerateIncremental(MacroAssembler* masm, Mode mode);
enum OnNoNeedToInformIncrementalMarker { enum OnNoNeedToInformIncrementalMarker {
kReturnOnNoNeedToInformIncrementalMarker, kReturnOnNoNeedToInformIncrementalMarker,
kUpdateRememberedSetOnNoNeedToInformIncrementalMarker kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
}; };
virtual inline Major MajorKey() const FINAL OVERRIDE { return RecordWrite; }
virtual void Generate(MacroAssembler* masm) OVERRIDE;
void GenerateIncremental(MacroAssembler* masm, Mode mode);
void CheckNeedsToInformIncrementalMarker( void CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm, MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need, OnNoNeedToInformIncrementalMarker on_no_need,
Mode mode); Mode mode);
void InformIncrementalMarker(MacroAssembler* masm); void InformIncrementalMarker(MacroAssembler* masm);
Major MajorKey() const { return RecordWrite; }
int MinorKey() const {
return MinorKeyFor(object_, value_, address_, remembered_set_action_,
save_fp_regs_mode_);
}
static int MinorKeyFor(Register object,
Register value,
Register address,
RememberedSetAction action,
SaveFPRegsMode fp_mode) {
DCHECK(object.Is64Bits());
DCHECK(value.Is64Bits());
DCHECK(address.Is64Bits());
return ObjectBits::encode(object.code()) |
ValueBits::encode(value.code()) |
AddressBits::encode(address.code()) |
RememberedSetActionBits::encode(action) |
SaveFPRegsModeBits::encode(fp_mode);
}
void Activate(Code* code) { void Activate(Code* code) {
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code); code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
} }
Register object() const {
return Register::from_code(ObjectBits::decode(minor_key_));
}
Register value() const {
return Register::from_code(ValueBits::decode(minor_key_));
}
Register address() const {
return Register::from_code(AddressBits::decode(minor_key_));
}
RememberedSetAction remembered_set_action() const {
return RememberedSetActionBits::decode(minor_key_);
}
SaveFPRegsMode save_fp_regs_mode() const {
return SaveFPRegsModeBits::decode(minor_key_);
}
class ObjectBits: public BitField<int, 0, 5> {}; class ObjectBits: public BitField<int, 0, 5> {};
class ValueBits: public BitField<int, 5, 5> {}; class ValueBits: public BitField<int, 5, 5> {};
class AddressBits: public BitField<int, 10, 5> {}; class AddressBits: public BitField<int, 10, 5> {};
class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {}; class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {};
class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {}; class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {};
Register object_;
Register value_;
Register address_;
RememberedSetAction remembered_set_action_;
SaveFPRegsMode save_fp_regs_mode_;
Label slow_; Label slow_;
RegisterAllocation regs_; RegisterAllocation regs_;
}; };
@ -348,14 +325,13 @@ class RecordWriteStub: public PlatformCodeStub {
class DirectCEntryStub: public PlatformCodeStub { class DirectCEntryStub: public PlatformCodeStub {
public: public:
explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {} explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
void Generate(MacroAssembler* masm);
void GenerateCall(MacroAssembler* masm, Register target); void GenerateCall(MacroAssembler* masm, Register target);
private: private:
Major MajorKey() const { return DirectCEntry; }
int MinorKey() const { return 0; }
bool NeedsImmovableCode() { return true; } bool NeedsImmovableCode() { return true; }
DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
}; };
@ -364,9 +340,9 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP }; enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
NameDictionaryLookupStub(Isolate* isolate, LookupMode mode) NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
: PlatformCodeStub(isolate), mode_(mode) { } : PlatformCodeStub(isolate) {
minor_key_ = LookupModeBits::encode(mode);
void Generate(MacroAssembler* masm); }
static void GenerateNegativeLookup(MacroAssembler* masm, static void GenerateNegativeLookup(MacroAssembler* masm,
Label* miss, Label* miss,
@ -398,78 +374,14 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
NameDictionary::kHeaderSize + NameDictionary::kHeaderSize +
NameDictionary::kElementsStartIndex * kPointerSize; NameDictionary::kElementsStartIndex * kPointerSize;
Major MajorKey() const { return NameDictionaryLookup; } LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
int MinorKey() const { return LookupModeBits::encode(mode_); }
class LookupModeBits: public BitField<LookupMode, 0, 1> {}; class LookupModeBits: public BitField<LookupMode, 0, 1> {};
LookupMode mode_; DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
}; };
class SubStringStub: public PlatformCodeStub {
public:
explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
private:
Major MajorKey() const { return SubString; }
int MinorKey() const { return 0; }
void Generate(MacroAssembler* masm);
};
class StringCompareStub: public PlatformCodeStub {
public:
explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
// Compares two flat ASCII strings and returns result in x0.
static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
Register left,
Register right,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4);
// Compare two flat ASCII strings for equality and returns result
// in x0.
static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
Register left,
Register right,
Register scratch1,
Register scratch2,
Register scratch3);
private:
virtual Major MajorKey() const { return StringCompare; }
virtual int MinorKey() const { return 0; }
virtual void Generate(MacroAssembler* masm);
static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
Register left,
Register right,
Register length,
Register scratch1,
Register scratch2,
Label* chars_not_equal);
};
class PlatformInterfaceDescriptor {
public:
explicit PlatformInterfaceDescriptor(
TargetAddressStorageMode storage_mode)
: storage_mode_(storage_mode) { }
TargetAddressStorageMode storage_mode() { return storage_mode_; }
private:
TargetAddressStorageMode storage_mode_;
};
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_ARM64_CODE_STUBS_ARM64_H_ #endif // V8_ARM64_CODE_STUBS_ARM64_H_

View File

@ -485,15 +485,15 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ B(ne, call_runtime); __ B(ne, call_runtime);
__ Ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset)); __ Ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
Label ascii, done; Label one_byte, done;
__ Bind(&check_encoding); __ Bind(&check_encoding);
STATIC_ASSERT(kTwoByteStringTag == 0); STATIC_ASSERT(kTwoByteStringTag == 0);
__ TestAndBranchIfAnySet(result, kStringEncodingMask, &ascii); __ TestAndBranchIfAnySet(result, kStringEncodingMask, &one_byte);
// Two-byte string. // Two-byte string.
__ Ldrh(result, MemOperand(string, index, SXTW, 1)); __ Ldrh(result, MemOperand(string, index, SXTW, 1));
__ B(&done); __ B(&done);
__ Bind(&ascii); __ Bind(&one_byte);
// Ascii string. // One-byte string.
__ Ldrb(result, MemOperand(string, index, SXTW)); __ Ldrb(result, MemOperand(string, index, SXTW));
__ Bind(&done); __ Bind(&done);
} }

View File

@ -6,7 +6,7 @@
#define V8_ARM64_CODEGEN_ARM64_H_ #define V8_ARM64_CODEGEN_ARM64_H_
#include "src/ast.h" #include "src/ast.h"
#include "src/ic-inl.h" #include "src/macro-assembler.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {

View File

@ -236,17 +236,17 @@ void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) { void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC load (from ic-arm.cc). // Calling convention for IC load (from ic-arm.cc).
Register receiver = LoadIC::ReceiverRegister(); Register receiver = LoadDescriptor::ReceiverRegister();
Register name = LoadIC::NameRegister(); Register name = LoadDescriptor::NameRegister();
Generate_DebugBreakCallHelper(masm, receiver.Bit() | name.Bit(), 0, x10); Generate_DebugBreakCallHelper(masm, receiver.Bit() | name.Bit(), 0, x10);
} }
void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) { void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC store (from ic-arm64.cc). // Calling convention for IC store (from ic-arm64.cc).
Register receiver = StoreIC::ReceiverRegister(); Register receiver = StoreDescriptor::ReceiverRegister();
Register name = StoreIC::NameRegister(); Register name = StoreDescriptor::NameRegister();
Register value = StoreIC::ValueRegister(); Register value = StoreDescriptor::ValueRegister();
Generate_DebugBreakCallHelper( Generate_DebugBreakCallHelper(
masm, receiver.Bit() | name.Bit() | value.Bit(), 0, x10); masm, receiver.Bit() | name.Bit() | value.Bit(), 0, x10);
} }
@ -260,9 +260,9 @@ void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) { void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
// Calling convention for IC keyed store call (from ic-arm64.cc). // Calling convention for IC keyed store call (from ic-arm64.cc).
Register receiver = KeyedStoreIC::ReceiverRegister(); Register receiver = StoreDescriptor::ReceiverRegister();
Register name = KeyedStoreIC::NameRegister(); Register name = StoreDescriptor::NameRegister();
Register value = KeyedStoreIC::ValueRegister(); Register value = StoreDescriptor::ValueRegister();
Generate_DebugBreakCallHelper( Generate_DebugBreakCallHelper(
masm, receiver.Bit() | name.Bit() | value.Bit(), 0, x10); masm, receiver.Bit() | name.Bit() | value.Bit(), 0, x10);
} }

View File

@ -17,13 +17,13 @@ namespace internal {
void DispatchingDecoderVisitor::AppendVisitor(DecoderVisitor* new_visitor) { void DispatchingDecoderVisitor::AppendVisitor(DecoderVisitor* new_visitor) {
visitors_.remove(new_visitor); visitors_.remove(new_visitor);
visitors_.push_front(new_visitor); visitors_.push_back(new_visitor);
} }
void DispatchingDecoderVisitor::PrependVisitor(DecoderVisitor* new_visitor) { void DispatchingDecoderVisitor::PrependVisitor(DecoderVisitor* new_visitor) {
visitors_.remove(new_visitor); visitors_.remove(new_visitor);
visitors_.push_back(new_visitor); visitors_.push_front(new_visitor);
} }

View File

@ -89,7 +89,7 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
void Deoptimizer::SetPlatformCompiledStubRegisters( void Deoptimizer::SetPlatformCompiledStubRegisters(
FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) { FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
ApiFunction function(descriptor->deoptimization_handler()); ApiFunction function(descriptor->deoptimization_handler());
ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_); ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
intptr_t handler = reinterpret_cast<intptr_t>(xref.address()); intptr_t handler = reinterpret_cast<intptr_t>(xref.address());

View File

@ -1517,7 +1517,9 @@ int Disassembler::SubstituteLiteralField(Instruction* instr,
case LDR_w_lit: case LDR_w_lit:
case LDR_x_lit: case LDR_x_lit:
case LDR_s_lit: case LDR_s_lit:
case LDR_d_lit: AppendToOutput("(addr %p)", instr->LiteralAddress()); break; case LDR_d_lit:
AppendToOutput("(addr 0x%016" PRIxPTR ")", instr->LiteralAddress());
break;
default: UNREACHABLE(); default: UNREACHABLE();
} }

View File

@ -6,15 +6,16 @@
#if V8_TARGET_ARCH_ARM64 #if V8_TARGET_ARCH_ARM64
#include "src/code-factory.h"
#include "src/code-stubs.h" #include "src/code-stubs.h"
#include "src/codegen.h" #include "src/codegen.h"
#include "src/compiler.h" #include "src/compiler.h"
#include "src/debug.h" #include "src/debug.h"
#include "src/full-codegen.h" #include "src/full-codegen.h"
#include "src/ic/ic.h"
#include "src/isolate-inl.h" #include "src/isolate-inl.h"
#include "src/parser.h" #include "src/parser.h"
#include "src/scopes.h" #include "src/scopes.h"
#include "src/stub-cache.h"
#include "src/arm64/code-stubs-arm64.h" #include "src/arm64/code-stubs-arm64.h"
#include "src/arm64/macro-assembler-arm64.h" #include "src/arm64/macro-assembler-arm64.h"
@ -1051,7 +1052,8 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
// Record position before stub call for type feedback. // Record position before stub call for type feedback.
SetSourcePosition(clause->position()); SetSourcePosition(clause->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT); Handle<Code> ic =
CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
CallIC(ic, clause->CompareId()); CallIC(ic, clause->CompareId());
patch_site.EmitPatchInfo(); patch_site.EmitPatchInfo();
@ -1178,7 +1180,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Bind(&fixed_array); __ Bind(&fixed_array);
__ LoadObject(x1, FeedbackVector()); __ LoadObject(x1, FeedbackVector());
__ Mov(x10, Operand(TypeFeedbackInfo::MegamorphicSentinel(isolate()))); __ Mov(x10, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
__ Str(x10, FieldMemOperand(x1, FixedArray::OffsetOfElementAt(slot))); __ Str(x10, FieldMemOperand(x1, FixedArray::OffsetOfElementAt(slot)));
__ Mov(x1, Smi::FromInt(1)); // Smi indicates slow check. __ Mov(x1, Smi::FromInt(1)); // Smi indicates slow check.
@ -1319,9 +1321,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure && !pretenure &&
scope()->is_function_scope() && scope()->is_function_scope() &&
info->num_literals() == 0) { info->num_literals() == 0) {
FastNewClosureStub stub(isolate(), FastNewClosureStub stub(isolate(), info->strict_mode(), info->kind());
info->strict_mode(),
info->is_generator());
__ Mov(x2, Operand(info)); __ Mov(x2, Operand(info));
__ CallStub(&stub); __ CallStub(&stub);
} else { } else {
@ -1341,6 +1341,26 @@ void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
} }
void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
Comment cnmt(masm_, "[ SuperReference ");
__ ldr(LoadDescriptor::ReceiverRegister(),
MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
Handle<Symbol> home_object_symbol(isolate()->heap()->home_object_symbol());
__ Mov(LoadDescriptor::NameRegister(), Operand(home_object_symbol));
CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
__ Mov(x10, Operand(isolate()->factory()->undefined_value()));
__ cmp(x0, x10);
Label done;
__ b(&done, ne);
__ CallRuntime(Runtime::kThrowNonMethodError, 0);
__ bind(&done);
}
void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy, void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
TypeofState typeof_state, TypeofState typeof_state,
Label* slow) { Label* slow) {
@ -1384,10 +1404,10 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
__ Bind(&fast); __ Bind(&fast);
} }
__ Ldr(LoadIC::ReceiverRegister(), GlobalObjectMemOperand()); __ Ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectMemOperand());
__ Mov(LoadIC::NameRegister(), Operand(proxy->var()->name())); __ Mov(LoadDescriptor::NameRegister(), Operand(proxy->var()->name()));
if (FLAG_vector_ics) { if (FLAG_vector_ics) {
__ Mov(LoadIC::SlotRegister(), __ Mov(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(proxy->VariableFeedbackSlot())); Smi::FromInt(proxy->VariableFeedbackSlot()));
} }
@ -1469,10 +1489,10 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
switch (var->location()) { switch (var->location()) {
case Variable::UNALLOCATED: { case Variable::UNALLOCATED: {
Comment cmnt(masm_, "Global variable"); Comment cmnt(masm_, "Global variable");
__ Ldr(LoadIC::ReceiverRegister(), GlobalObjectMemOperand()); __ Ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectMemOperand());
__ Mov(LoadIC::NameRegister(), Operand(var->name())); __ Mov(LoadDescriptor::NameRegister(), Operand(var->name()));
if (FLAG_vector_ics) { if (FLAG_vector_ics) {
__ Mov(LoadIC::SlotRegister(), __ Mov(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(proxy->VariableFeedbackSlot())); Smi::FromInt(proxy->VariableFeedbackSlot()));
} }
CallLoadIC(CONTEXTUAL); CallLoadIC(CONTEXTUAL);
@ -1682,9 +1702,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (key->value()->IsInternalizedString()) { if (key->value()->IsInternalizedString()) {
if (property->emit_store()) { if (property->emit_store()) {
VisitForAccumulatorValue(value); VisitForAccumulatorValue(value);
DCHECK(StoreIC::ValueRegister().is(x0)); DCHECK(StoreDescriptor::ValueRegister().is(x0));
__ Mov(StoreIC::NameRegister(), Operand(key->value())); __ Mov(StoreDescriptor::NameRegister(), Operand(key->value()));
__ Peek(StoreIC::ReceiverRegister(), 0); __ Peek(StoreDescriptor::ReceiverRegister(), 0);
CallStoreIC(key->LiteralFeedbackId()); CallStoreIC(key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS); PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else { } else {
@ -1844,12 +1864,18 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
// Left-hand side can only be a property, a global or a (parameter or local) // Left-hand side can only be a property, a global or a (parameter or local)
// slot. // slot.
enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY }; enum LhsKind {
VARIABLE,
NAMED_PROPERTY,
KEYED_PROPERTY,
NAMED_SUPER_PROPERTY
};
LhsKind assign_type = VARIABLE; LhsKind assign_type = VARIABLE;
Property* property = expr->target()->AsProperty(); Property* property = expr->target()->AsProperty();
if (property != NULL) { if (property != NULL) {
assign_type = (property->key()->IsPropertyName()) assign_type = (property->key()->IsPropertyName())
? NAMED_PROPERTY ? (property->IsSuperAccess() ? NAMED_SUPER_PROPERTY
: NAMED_PROPERTY)
: KEYED_PROPERTY; : KEYED_PROPERTY;
} }
@ -1862,17 +1888,27 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
if (expr->is_compound()) { if (expr->is_compound()) {
// We need the receiver both on the stack and in the register. // We need the receiver both on the stack and in the register.
VisitForStackValue(property->obj()); VisitForStackValue(property->obj());
__ Peek(LoadIC::ReceiverRegister(), 0); __ Peek(LoadDescriptor::ReceiverRegister(), 0);
} else { } else {
VisitForStackValue(property->obj()); VisitForStackValue(property->obj());
} }
break; break;
case NAMED_SUPER_PROPERTY:
VisitForStackValue(property->obj()->AsSuperReference()->this_var());
EmitLoadHomeObject(property->obj()->AsSuperReference());
__ Push(result_register());
if (expr->is_compound()) {
const Register scratch = x10;
__ Peek(scratch, kPointerSize);
__ Push(scratch, result_register());
}
break;
case KEYED_PROPERTY: case KEYED_PROPERTY:
if (expr->is_compound()) { if (expr->is_compound()) {
VisitForStackValue(property->obj()); VisitForStackValue(property->obj());
VisitForStackValue(property->key()); VisitForStackValue(property->key());
__ Peek(LoadIC::ReceiverRegister(), 1 * kPointerSize); __ Peek(LoadDescriptor::ReceiverRegister(), 1 * kPointerSize);
__ Peek(LoadIC::NameRegister(), 0); __ Peek(LoadDescriptor::NameRegister(), 0);
} else { } else {
VisitForStackValue(property->obj()); VisitForStackValue(property->obj());
VisitForStackValue(property->key()); VisitForStackValue(property->key());
@ -1893,6 +1929,10 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
EmitNamedPropertyLoad(property); EmitNamedPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(), TOS_REG); PrepareForBailoutForId(property->LoadId(), TOS_REG);
break; break;
case NAMED_SUPER_PROPERTY:
EmitNamedSuperPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(), TOS_REG);
break;
case KEYED_PROPERTY: case KEYED_PROPERTY:
EmitKeyedPropertyLoad(property); EmitKeyedPropertyLoad(property);
PrepareForBailoutForId(property->LoadId(), TOS_REG); PrepareForBailoutForId(property->LoadId(), TOS_REG);
@ -1939,6 +1979,9 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
case NAMED_PROPERTY: case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr); EmitNamedPropertyAssignment(expr);
break; break;
case NAMED_SUPER_PROPERTY:
EmitNamedSuperPropertyAssignment(expr);
break;
case KEYED_PROPERTY: case KEYED_PROPERTY:
EmitKeyedPropertyAssignment(expr); EmitKeyedPropertyAssignment(expr);
break; break;
@ -1949,9 +1992,11 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) { void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position()); SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral(); Literal* key = prop->key()->AsLiteral();
__ Mov(LoadIC::NameRegister(), Operand(key->value())); DCHECK(!prop->IsSuperAccess());
__ Mov(LoadDescriptor::NameRegister(), Operand(key->value()));
if (FLAG_vector_ics) { if (FLAG_vector_ics) {
__ Mov(LoadIC::SlotRegister(), __ Mov(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(prop->PropertyFeedbackSlot())); Smi::FromInt(prop->PropertyFeedbackSlot()));
CallLoadIC(NOT_CONTEXTUAL); CallLoadIC(NOT_CONTEXTUAL);
} else { } else {
@ -1960,12 +2005,24 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
} }
void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
// Stack: receiver, home_object.
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
DCHECK(!key->value()->IsSmi());
DCHECK(prop->IsSuperAccess());
__ Push(key->value());
__ CallRuntime(Runtime::kLoadFromSuper, 3);
}
void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) { void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
SetSourcePosition(prop->position()); SetSourcePosition(prop->position());
// Call keyed load IC. It has arguments key and receiver in r0 and r1. // Call keyed load IC. It has arguments key and receiver in r0 and r1.
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
if (FLAG_vector_ics) { if (FLAG_vector_ics) {
__ Mov(LoadIC::SlotRegister(), __ Mov(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(prop->PropertyFeedbackSlot())); Smi::FromInt(prop->PropertyFeedbackSlot()));
CallIC(ic); CallIC(ic);
} else { } else {
@ -1993,10 +2050,11 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
patch_site.EmitJumpIfSmi(x10, &both_smis); patch_site.EmitJumpIfSmi(x10, &both_smis);
__ Bind(&stub_call); __ Bind(&stub_call);
BinaryOpICStub stub(isolate(), op, mode);
Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
{ {
Assembler::BlockPoolsScope scope(masm_); Assembler::BlockPoolsScope scope(masm_);
CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId()); CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo(); patch_site.EmitPatchInfo();
} }
__ B(&done); __ B(&done);
@ -2019,16 +2077,14 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ Ubfx(right, right, kSmiShift, 5); __ Ubfx(right, right, kSmiShift, 5);
__ Lsl(result, left, right); __ Lsl(result, left, right);
break; break;
case Token::SHR: { case Token::SHR:
Label right_not_zero; // If `left >>> right` >= 0x80000000, the result is not representable in a
__ Cbnz(right, &right_not_zero); // signed 32-bit smi.
__ Tbnz(left, kXSignBit, &stub_call);
__ Bind(&right_not_zero);
__ Ubfx(right, right, kSmiShift, 5); __ Ubfx(right, right, kSmiShift, 5);
__ Lsr(result, left, right); __ Lsr(x10, left, right);
__ Bic(result, result, kSmiShiftMask); __ Tbnz(x10, kXSignBit, &stub_call);
__ Bic(result, x10, kSmiShiftMask);
break; break;
}
case Token::ADD: case Token::ADD:
__ Adds(x10, left, right); __ Adds(x10, left, right);
__ B(vs, &stub_call); __ B(vs, &stub_call);
@ -2079,11 +2135,11 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
Token::Value op, Token::Value op,
OverwriteMode mode) { OverwriteMode mode) {
__ Pop(x1); __ Pop(x1);
BinaryOpICStub stub(isolate(), op, mode); Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
JumpPatchSite patch_site(masm_); // Unbound, signals no inlined smi code. JumpPatchSite patch_site(masm_); // Unbound, signals no inlined smi code.
{ {
Assembler::BlockPoolsScope scope(masm_); Assembler::BlockPoolsScope scope(masm_);
CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId()); CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo(); patch_site.EmitPatchInfo();
} }
context()->Plug(x0); context()->Plug(x0);
@ -2116,9 +2172,9 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
VisitForAccumulatorValue(prop->obj()); VisitForAccumulatorValue(prop->obj());
// TODO(all): We could introduce a VisitForRegValue(reg, expr) to avoid // TODO(all): We could introduce a VisitForRegValue(reg, expr) to avoid
// this copy. // this copy.
__ Mov(StoreIC::ReceiverRegister(), x0); __ Mov(StoreDescriptor::ReceiverRegister(), x0);
__ Pop(StoreIC::ValueRegister()); // Restore value. __ Pop(StoreDescriptor::ValueRegister()); // Restore value.
__ Mov(StoreIC::NameRegister(), __ Mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value())); Operand(prop->key()->AsLiteral()->value()));
CallStoreIC(); CallStoreIC();
break; break;
@ -2127,11 +2183,11 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
__ Push(x0); // Preserve value. __ Push(x0); // Preserve value.
VisitForStackValue(prop->obj()); VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key()); VisitForAccumulatorValue(prop->key());
__ Mov(KeyedStoreIC::NameRegister(), x0); __ Mov(StoreDescriptor::NameRegister(), x0);
__ Pop(KeyedStoreIC::ReceiverRegister(), KeyedStoreIC::ValueRegister()); __ Pop(StoreDescriptor::ReceiverRegister(),
Handle<Code> ic = strict_mode() == SLOPPY StoreDescriptor::ValueRegister());
? isolate()->builtins()->KeyedStoreIC_Initialize() Handle<Code> ic =
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict(); CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
CallIC(ic); CallIC(ic);
break; break;
} }
@ -2158,8 +2214,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
ASM_LOCATION("FullCodeGenerator::EmitVariableAssignment"); ASM_LOCATION("FullCodeGenerator::EmitVariableAssignment");
if (var->IsUnallocated()) { if (var->IsUnallocated()) {
// Global var, const, or let. // Global var, const, or let.
__ Mov(StoreIC::NameRegister(), Operand(var->name())); __ Mov(StoreDescriptor::NameRegister(), Operand(var->name()));
__ Ldr(StoreIC::ReceiverRegister(), GlobalObjectMemOperand()); __ Ldr(StoreDescriptor::ReceiverRegister(), GlobalObjectMemOperand());
CallStoreIC(); CallStoreIC();
} else if (op == Token::INIT_CONST_LEGACY) { } else if (op == Token::INIT_CONST_LEGACY) {
@ -2231,8 +2287,9 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Record source code position before IC call. // Record source code position before IC call.
SetSourcePosition(expr->position()); SetSourcePosition(expr->position());
__ Mov(StoreIC::NameRegister(), Operand(prop->key()->AsLiteral()->value())); __ Mov(StoreDescriptor::NameRegister(),
__ Pop(StoreIC::ReceiverRegister()); Operand(prop->key()->AsLiteral()->value()));
__ Pop(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->AssignmentFeedbackId()); CallStoreIC(expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@ -2240,6 +2297,24 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
} }
void FullCodeGenerator::EmitNamedSuperPropertyAssignment(Assignment* expr) {
// Assignment to named property of super.
// x0 : value
// stack : receiver ('this'), home_object
Property* prop = expr->target()->AsProperty();
DCHECK(prop != NULL);
Literal* key = prop->key()->AsLiteral();
DCHECK(key != NULL);
__ Push(x0);
__ Push(key->value());
__ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreToSuper_Strict
: Runtime::kStoreToSuper_Sloppy),
4);
context()->Plug(x0);
}
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) { void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
ASM_LOCATION("FullCodeGenerator::EmitKeyedPropertyAssignment"); ASM_LOCATION("FullCodeGenerator::EmitKeyedPropertyAssignment");
// Assignment to a property, using a keyed store IC. // Assignment to a property, using a keyed store IC.
@ -2247,12 +2322,10 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Record source code position before IC call. // Record source code position before IC call.
SetSourcePosition(expr->position()); SetSourcePosition(expr->position());
// TODO(all): Could we pass this in registers rather than on the stack? // TODO(all): Could we pass this in registers rather than on the stack?
__ Pop(KeyedStoreIC::NameRegister(), KeyedStoreIC::ReceiverRegister()); __ Pop(StoreDescriptor::NameRegister(), StoreDescriptor::ReceiverRegister());
DCHECK(KeyedStoreIC::ValueRegister().is(x0)); DCHECK(StoreDescriptor::ValueRegister().is(x0));
Handle<Code> ic = strict_mode() == SLOPPY Handle<Code> ic = CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
? isolate()->builtins()->KeyedStoreIC_Initialize()
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
CallIC(ic, expr->AssignmentFeedbackId()); CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@ -2265,16 +2338,23 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
Expression* key = expr->key(); Expression* key = expr->key();
if (key->IsPropertyName()) { if (key->IsPropertyName()) {
if (!expr->IsSuperAccess()) {
VisitForAccumulatorValue(expr->obj()); VisitForAccumulatorValue(expr->obj());
__ Move(LoadIC::ReceiverRegister(), x0); __ Move(LoadDescriptor::ReceiverRegister(), x0);
EmitNamedPropertyLoad(expr); EmitNamedPropertyLoad(expr);
} else {
VisitForStackValue(expr->obj()->AsSuperReference()->this_var());
EmitLoadHomeObject(expr->obj()->AsSuperReference());
__ Push(result_register());
EmitNamedSuperPropertyLoad(expr);
}
PrepareForBailoutForId(expr->LoadId(), TOS_REG); PrepareForBailoutForId(expr->LoadId(), TOS_REG);
context()->Plug(x0); context()->Plug(x0);
} else { } else {
VisitForStackValue(expr->obj()); VisitForStackValue(expr->obj());
VisitForAccumulatorValue(expr->key()); VisitForAccumulatorValue(expr->key());
__ Move(LoadIC::NameRegister(), x0); __ Move(LoadDescriptor::NameRegister(), x0);
__ Pop(LoadIC::ReceiverRegister()); __ Pop(LoadDescriptor::ReceiverRegister());
EmitKeyedPropertyLoad(expr); EmitKeyedPropertyLoad(expr);
context()->Plug(x0); context()->Plug(x0);
} }
@ -2294,12 +2374,11 @@ void FullCodeGenerator::CallIC(Handle<Code> code,
void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) { void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression(); Expression* callee = expr->expression();
CallIC::CallType call_type = callee->IsVariableProxy() CallICState::CallType call_type =
? CallIC::FUNCTION callee->IsVariableProxy() ? CallICState::FUNCTION : CallICState::METHOD;
: CallIC::METHOD;
// Get the target function. // Get the target function.
if (call_type == CallIC::FUNCTION) { if (call_type == CallICState::FUNCTION) {
{ StackValueContext context(this); { StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy()); EmitVariableLoad(callee->AsVariableProxy());
PrepareForBailout(callee, NO_REGISTERS); PrepareForBailout(callee, NO_REGISTERS);
@ -2310,7 +2389,8 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
} else { } else {
// Load the function from the receiver. // Load the function from the receiver.
DCHECK(callee->IsProperty()); DCHECK(callee->IsProperty());
__ Peek(LoadIC::ReceiverRegister(), 0); DCHECK(!callee->AsProperty()->IsSuperAccess());
__ Peek(LoadDescriptor::ReceiverRegister(), 0);
EmitNamedPropertyLoad(callee->AsProperty()); EmitNamedPropertyLoad(callee->AsProperty());
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG); PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
// Push the target function under the receiver. // Push the target function under the receiver.
@ -2322,6 +2402,45 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
} }
void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
DCHECK(callee->IsProperty());
Property* prop = callee->AsProperty();
DCHECK(prop->IsSuperAccess());
SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
DCHECK(!key->value()->IsSmi());
// Load the function from the receiver.
const Register scratch = x10;
SuperReference* super_ref = callee->AsProperty()->obj()->AsSuperReference();
EmitLoadHomeObject(super_ref);
__ Push(x0);
VisitForAccumulatorValue(super_ref->this_var());
__ Push(x0);
__ Peek(scratch, kPointerSize);
__ Push(x0, scratch);
__ Push(key->value());
// Stack here:
// - home_object
// - this (receiver)
// - this (receiver) <-- LoadFromSuper will pop here and below.
// - home_object
// - key
__ CallRuntime(Runtime::kLoadFromSuper, 3);
// Replace home_object with target function.
__ Poke(x0, kPointerSize);
// Stack here:
// - target function
// - this (receiver)
EmitCall(expr, CallICState::METHOD);
}
// Code common for calls using the IC. // Code common for calls using the IC.
void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr, void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
Expression* key) { Expression* key) {
@ -2332,8 +2451,8 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
// Load the function from the receiver. // Load the function from the receiver.
DCHECK(callee->IsProperty()); DCHECK(callee->IsProperty());
__ Peek(LoadIC::ReceiverRegister(), 0); __ Peek(LoadDescriptor::ReceiverRegister(), 0);
__ Move(LoadIC::NameRegister(), x0); __ Move(LoadDescriptor::NameRegister(), x0);
EmitKeyedPropertyLoad(callee->AsProperty()); EmitKeyedPropertyLoad(callee->AsProperty());
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG); PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
@ -2341,11 +2460,11 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
__ Pop(x10); __ Pop(x10);
__ Push(x0, x10); __ Push(x0, x10);
EmitCall(expr, CallIC::METHOD); EmitCall(expr, CallICState::METHOD);
} }
void FullCodeGenerator::EmitCall(Call* expr, CallIC::CallType call_type) { void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
// Load the arguments. // Load the arguments.
ZoneList<Expression*>* args = expr->arguments(); ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length(); int arg_count = args->length();
@ -2494,15 +2613,21 @@ void FullCodeGenerator::VisitCall(Call* expr) {
EmitCall(expr); EmitCall(expr);
} else if (call_type == Call::PROPERTY_CALL) { } else if (call_type == Call::PROPERTY_CALL) {
Property* property = callee->AsProperty(); Property* property = callee->AsProperty();
{ PreservePositionScope scope(masm()->positions_recorder()); bool is_named_call = property->key()->IsPropertyName();
// super.x() is handled in EmitCallWithLoadIC.
if (property->IsSuperAccess() && is_named_call) {
EmitSuperCallWithLoadIC(expr);
} else {
{
PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(property->obj()); VisitForStackValue(property->obj());
} }
if (property->key()->IsPropertyName()) { if (is_named_call) {
EmitCallWithLoadIC(expr); EmitCallWithLoadIC(expr);
} else { } else {
EmitKeyedCallWithLoadIC(expr, property->key()); EmitKeyedCallWithLoadIC(expr, property->key());
} }
}
} else { } else {
DCHECK(call_type == Call::OTHER_CALL); DCHECK(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above. // Call to an arbitrary expression not handled specially above.
@ -2822,7 +2947,7 @@ void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
&if_true, &if_false, &fall_through); &if_true, &if_false, &fall_through);
// Only a HeapNumber can be -0.0, so return false if we have something else. // Only a HeapNumber can be -0.0, so return false if we have something else.
__ CheckMap(x0, x1, Heap::kHeapNumberMapRootIndex, if_false, DO_SMI_CHECK); __ JumpIfNotHeapNumber(x0, if_false, DO_SMI_CHECK);
// Test the bit pattern. // Test the bit pattern.
__ Ldr(x10, FieldMemOperand(x0, HeapNumber::kValueOffset)); __ Ldr(x10, FieldMemOperand(x0, HeapNumber::kValueOffset));
@ -3015,7 +3140,7 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
// Functions have class 'Function'. // Functions have class 'Function'.
__ Bind(&function); __ Bind(&function);
__ LoadRoot(x0, Heap::kfunction_class_stringRootIndex); __ LoadRoot(x0, Heap::kFunction_stringRootIndex);
__ B(&done); __ B(&done);
// Objects with a non-function constructor have class 'Object'. // Objects with a non-function constructor have class 'Object'.
@ -3134,9 +3259,9 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
Register value = x2; Register value = x2;
Register scratch = x10; Register scratch = x10;
VisitForStackValue(args->at(1)); // index VisitForStackValue(args->at(0)); // index
VisitForStackValue(args->at(2)); // value VisitForStackValue(args->at(1)); // value
VisitForAccumulatorValue(args->at(0)); // string VisitForAccumulatorValue(args->at(2)); // string
__ Pop(value, index); __ Pop(value, index);
if (FLAG_debug_code) { if (FLAG_debug_code) {
@ -3164,9 +3289,9 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
Register value = x2; Register value = x2;
Register scratch = x10; Register scratch = x10;
VisitForStackValue(args->at(1)); // index VisitForStackValue(args->at(0)); // index
VisitForStackValue(args->at(2)); // value VisitForStackValue(args->at(1)); // value
VisitForAccumulatorValue(args->at(0)); // string VisitForAccumulatorValue(args->at(2)); // string
__ Pop(value, index); __ Pop(value, index);
if (FLAG_debug_code) { if (FLAG_debug_code) {
@ -3507,8 +3632,8 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
} }
void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
ASM_LOCATION("FullCodeGenerator::EmitFastAsciiArrayJoin"); ASM_LOCATION("FullCodeGenerator::EmitFastOneByteArrayJoin");
ZoneList<Expression*>* args = expr->arguments(); ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2); DCHECK(args->length() == 2);
@ -3560,7 +3685,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Get the FixedArray containing array's elements. // Get the FixedArray containing array's elements.
__ Ldr(elements, FieldMemOperand(array, JSArray::kElementsOffset)); __ Ldr(elements, FieldMemOperand(array, JSArray::kElementsOffset));
// Check that all array elements are sequential ASCII strings, and // Check that all array elements are sequential one-byte strings, and
// accumulate the sum of their lengths. // accumulate the sum of their lengths.
__ Mov(string_length, 0); __ Mov(string_length, 0);
__ Add(element, elements, FixedArray::kHeaderSize - kHeapObjectTag); __ Add(element, elements, FixedArray::kHeaderSize - kHeapObjectTag);
@ -3575,14 +3700,14 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// elements_end: Array end. // elements_end: Array end.
if (FLAG_debug_code) { if (FLAG_debug_code) {
__ Cmp(array_length, 0); __ Cmp(array_length, 0);
__ Assert(gt, kNoEmptyArraysHereInEmitFastAsciiArrayJoin); __ Assert(gt, kNoEmptyArraysHereInEmitFastOneByteArrayJoin);
} }
__ Bind(&loop); __ Bind(&loop);
__ Ldr(string, MemOperand(element, kPointerSize, PostIndex)); __ Ldr(string, MemOperand(element, kPointerSize, PostIndex));
__ JumpIfSmi(string, &bailout); __ JumpIfSmi(string, &bailout);
__ Ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset)); __ Ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
__ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout); __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
__ Ldrsw(scratch1, __ Ldrsw(scratch1,
UntagSmiFieldMemOperand(string, SeqOneByteString::kLengthOffset)); UntagSmiFieldMemOperand(string, SeqOneByteString::kLengthOffset));
__ Adds(string_length, string_length, scratch1); __ Adds(string_length, string_length, scratch1);
@ -3604,11 +3729,11 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// string_length: Sum of string lengths (not smi). // string_length: Sum of string lengths (not smi).
// elements: FixedArray of strings. // elements: FixedArray of strings.
// Check that the separator is a flat ASCII string. // Check that the separator is a flat one-byte string.
__ JumpIfSmi(separator, &bailout); __ JumpIfSmi(separator, &bailout);
__ Ldr(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset)); __ Ldr(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
__ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout); __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
// Add (separator length times array_length) - separator length to the // Add (separator length times array_length) - separator length to the
// string_length to get the length of the result string. // string_length to get the length of the result string.
@ -3628,13 +3753,13 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// separator: Separator string // separator: Separator string
// string_length: Length of result string (not smi) // string_length: Length of result string (not smi)
// array_length: Length of the array (not smi). // array_length: Length of the array (not smi).
__ AllocateAsciiString(result, string_length, scratch1, scratch2, scratch3, __ AllocateOneByteString(result, string_length, scratch1, scratch2, scratch3,
&bailout); &bailout);
// Prepare for looping. Set up elements_end to end of the array. Set // Prepare for looping. Set up elements_end to end of the array. Set
// result_pos to the position of the result where to write the first // result_pos to the position of the result where to write the first
// character. // character.
// TODO(all): useless unless AllocateAsciiString trashes the register. // TODO(all): useless unless AllocateOneByteString trashes the register.
__ Add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2)); __ Add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
__ Add(result_pos, result, SeqOneByteString::kHeaderSize - kHeapObjectTag); __ Add(result_pos, result, SeqOneByteString::kHeaderSize - kHeapObjectTag);
@ -3662,7 +3787,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// One-character separator case // One-character separator case
__ Bind(&one_char_separator); __ Bind(&one_char_separator);
// Replace separator with its ASCII character value. // Replace separator with its one-byte character value.
__ Ldrb(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize)); __ Ldrb(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
// Jump into the loop after the code that copies the separator, so the first // Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator // element is not preceded by a separator
@ -3673,7 +3798,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// result_pos: the position to which we are currently copying characters. // result_pos: the position to which we are currently copying characters.
// element: Current array element. // element: Current array element.
// elements_end: Array end. // elements_end: Array end.
// separator: Single separator ASCII char (in lower byte). // separator: Single separator one-byte char (in lower byte).
// Copy the separator character to the result. // Copy the separator character to the result.
__ Strb(separator, MemOperand(result_pos, 1, PostIndex)); __ Strb(separator, MemOperand(result_pos, 1, PostIndex));
@ -3749,15 +3874,15 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
if (expr->is_jsruntime()) { if (expr->is_jsruntime()) {
// Push the builtins object as the receiver. // Push the builtins object as the receiver.
__ Ldr(x10, GlobalObjectMemOperand()); __ Ldr(x10, GlobalObjectMemOperand());
__ Ldr(LoadIC::ReceiverRegister(), __ Ldr(LoadDescriptor::ReceiverRegister(),
FieldMemOperand(x10, GlobalObject::kBuiltinsOffset)); FieldMemOperand(x10, GlobalObject::kBuiltinsOffset));
__ Push(LoadIC::ReceiverRegister()); __ Push(LoadDescriptor::ReceiverRegister());
// Load the function from the receiver. // Load the function from the receiver.
Handle<String> name = expr->name(); Handle<String> name = expr->name();
__ Mov(LoadIC::NameRegister(), Operand(name)); __ Mov(LoadDescriptor::NameRegister(), Operand(name));
if (FLAG_vector_ics) { if (FLAG_vector_ics) {
__ Mov(LoadIC::SlotRegister(), __ Mov(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(expr->CallRuntimeFeedbackSlot())); Smi::FromInt(expr->CallRuntimeFeedbackSlot()));
CallLoadIC(NOT_CONTEXTUAL); CallLoadIC(NOT_CONTEXTUAL);
} else { } else {
@ -3922,6 +4047,11 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (prop != NULL) { if (prop != NULL) {
assign_type = assign_type =
(prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY; (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
if (prop->IsSuperAccess()) {
// throw exception.
VisitSuperReference(prop->obj()->AsSuperReference());
return;
}
} }
// Evaluate expression and get value. // Evaluate expression and get value.
@ -3937,14 +4067,14 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
if (assign_type == NAMED_PROPERTY) { if (assign_type == NAMED_PROPERTY) {
// Put the object both on the stack and in the register. // Put the object both on the stack and in the register.
VisitForStackValue(prop->obj()); VisitForStackValue(prop->obj());
__ Peek(LoadIC::ReceiverRegister(), 0); __ Peek(LoadDescriptor::ReceiverRegister(), 0);
EmitNamedPropertyLoad(prop); EmitNamedPropertyLoad(prop);
} else { } else {
// KEYED_PROPERTY // KEYED_PROPERTY
VisitForStackValue(prop->obj()); VisitForStackValue(prop->obj());
VisitForStackValue(prop->key()); VisitForStackValue(prop->key());
__ Peek(LoadIC::ReceiverRegister(), 1 * kPointerSize); __ Peek(LoadDescriptor::ReceiverRegister(), 1 * kPointerSize);
__ Peek(LoadIC::NameRegister(), 0); __ Peek(LoadDescriptor::NameRegister(), 0);
EmitKeyedPropertyLoad(prop); EmitKeyedPropertyLoad(prop);
} }
} }
@ -4025,8 +4155,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
{ {
Assembler::BlockPoolsScope scope(masm_); Assembler::BlockPoolsScope scope(masm_);
BinaryOpICStub stub(isolate(), Token::ADD, NO_OVERWRITE); Handle<Code> code =
CallIC(stub.GetCode(), expr->CountBinOpFeedbackId()); CodeFactory::BinaryOpIC(isolate(), Token::ADD, NO_OVERWRITE).code();
CallIC(code, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo(); patch_site.EmitPatchInfo();
} }
__ Bind(&done); __ Bind(&done);
@ -4054,9 +4185,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
} }
break; break;
case NAMED_PROPERTY: { case NAMED_PROPERTY: {
__ Mov(StoreIC::NameRegister(), __ Mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value())); Operand(prop->key()->AsLiteral()->value()));
__ Pop(StoreIC::ReceiverRegister()); __ Pop(StoreDescriptor::ReceiverRegister());
CallStoreIC(expr->CountStoreFeedbackId()); CallStoreIC(expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) { if (expr->is_postfix()) {
@ -4069,11 +4200,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
break; break;
} }
case KEYED_PROPERTY: { case KEYED_PROPERTY: {
__ Pop(KeyedStoreIC::NameRegister()); __ Pop(StoreDescriptor::NameRegister());
__ Pop(KeyedStoreIC::ReceiverRegister()); __ Pop(StoreDescriptor::ReceiverRegister());
Handle<Code> ic = strict_mode() == SLOPPY Handle<Code> ic =
? isolate()->builtins()->KeyedStoreIC_Initialize() CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
CallIC(ic, expr->CountStoreFeedbackId()); CallIC(ic, expr->CountStoreFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
if (expr->is_postfix()) { if (expr->is_postfix()) {
@ -4095,10 +4225,10 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
VariableProxy* proxy = expr->AsVariableProxy(); VariableProxy* proxy = expr->AsVariableProxy();
if (proxy != NULL && proxy->var()->IsUnallocated()) { if (proxy != NULL && proxy->var()->IsUnallocated()) {
Comment cmnt(masm_, "Global variable"); Comment cmnt(masm_, "Global variable");
__ Ldr(LoadIC::ReceiverRegister(), GlobalObjectMemOperand()); __ Ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectMemOperand());
__ Mov(LoadIC::NameRegister(), Operand(proxy->name())); __ Mov(LoadDescriptor::NameRegister(), Operand(proxy->name()));
if (FLAG_vector_ics) { if (FLAG_vector_ics) {
__ Mov(LoadIC::SlotRegister(), __ Mov(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(proxy->VariableFeedbackSlot())); Smi::FromInt(proxy->VariableFeedbackSlot()));
} }
// Use a regular load, not a contextual load, to avoid a reference // Use a regular load, not a contextual load, to avoid a reference
@ -4271,7 +4401,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
// Record position and call the compare IC. // Record position and call the compare IC.
SetSourcePosition(expr->position()); SetSourcePosition(expr->position());
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
CallIC(ic, expr->CompareOperationFeedbackId()); CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo(); patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@ -4332,12 +4462,12 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// don't want to spend too much time on it now. // don't want to spend too much time on it now.
switch (expr->yield_kind()) { switch (expr->yield_kind()) {
case Yield::SUSPEND: case Yield::kSuspend:
// Pop value from top-of-stack slot; box result into result register. // Pop value from top-of-stack slot; box result into result register.
EmitCreateIteratorResult(false); EmitCreateIteratorResult(false);
__ Push(result_register()); __ Push(result_register());
// Fall through. // Fall through.
case Yield::INITIAL: { case Yield::kInitial: {
Label suspend, continuation, post_runtime, resume; Label suspend, continuation, post_runtime, resume;
__ B(&suspend); __ B(&suspend);
@ -4372,7 +4502,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
break; break;
} }
case Yield::FINAL: { case Yield::kFinal: {
VisitForAccumulatorValue(expr->generator_object()); VisitForAccumulatorValue(expr->generator_object());
__ Mov(x1, Smi::FromInt(JSGeneratorObject::kGeneratorClosed)); __ Mov(x1, Smi::FromInt(JSGeneratorObject::kGeneratorClosed));
__ Str(x1, FieldMemOperand(result_register(), __ Str(x1, FieldMemOperand(result_register(),
@ -4384,7 +4514,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
break; break;
} }
case Yield::DELEGATING: { case Yield::kDelegating: {
VisitForStackValue(expr->generator_object()); VisitForStackValue(expr->generator_object());
// Initial stack layout is as follows: // Initial stack layout is as follows:
@ -4393,8 +4523,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
Label l_catch, l_try, l_suspend, l_continuation, l_resume; Label l_catch, l_try, l_suspend, l_continuation, l_resume;
Label l_next, l_call, l_loop; Label l_next, l_call, l_loop;
Register load_receiver = LoadIC::ReceiverRegister(); Register load_receiver = LoadDescriptor::ReceiverRegister();
Register load_name = LoadIC::NameRegister(); Register load_name = LoadDescriptor::NameRegister();
// Initial send value is undefined. // Initial send value is undefined.
__ LoadRoot(x0, Heap::kUndefinedValueRootIndex); __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
@ -4454,10 +4584,10 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ Peek(load_receiver, 1 * kPointerSize); __ Peek(load_receiver, 1 * kPointerSize);
__ Peek(load_name, 2 * kPointerSize); __ Peek(load_name, 2 * kPointerSize);
if (FLAG_vector_ics) { if (FLAG_vector_ics) {
__ Mov(LoadIC::SlotRegister(), __ Mov(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(expr->KeyedLoadFeedbackSlot())); Smi::FromInt(expr->KeyedLoadFeedbackSlot()));
} }
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
CallIC(ic, TypeFeedbackId::None()); CallIC(ic, TypeFeedbackId::None());
__ Mov(x1, x0); __ Mov(x1, x0);
__ Poke(x1, 2 * kPointerSize); __ Poke(x1, 2 * kPointerSize);
@ -4474,7 +4604,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ Push(load_receiver); // save result __ Push(load_receiver); // save result
__ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done" __ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done"
if (FLAG_vector_ics) { if (FLAG_vector_ics) {
__ Mov(LoadIC::SlotRegister(), __ Mov(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(expr->DoneFeedbackSlot())); Smi::FromInt(expr->DoneFeedbackSlot()));
} }
CallLoadIC(NOT_CONTEXTUAL); // x0=result.done CallLoadIC(NOT_CONTEXTUAL); // x0=result.done
@ -4487,7 +4617,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ Pop(load_receiver); // result __ Pop(load_receiver); // result
__ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value" __ LoadRoot(load_name, Heap::kvalue_stringRootIndex); // "value"
if (FLAG_vector_ics) { if (FLAG_vector_ics) {
__ Mov(LoadIC::SlotRegister(), __ Mov(VectorLoadICDescriptor::SlotRegister(),
Smi::FromInt(expr->ValueFeedbackSlot())); Smi::FromInt(expr->ValueFeedbackSlot()));
} }
CallLoadIC(NOT_CONTEXTUAL); // x0=result.value CallLoadIC(NOT_CONTEXTUAL); // x0=result.value

View File

@ -182,8 +182,8 @@ LSDataSize CalcLSPairDataSize(LoadStorePairOp op) {
} }
ptrdiff_t Instruction::ImmPCOffset() { int64_t Instruction::ImmPCOffset() {
ptrdiff_t offset; int64_t offset;
if (IsPCRelAddressing()) { if (IsPCRelAddressing()) {
// PC-relative addressing. Only ADR is supported. // PC-relative addressing. Only ADR is supported.
offset = ImmPCRel(); offset = ImmPCRel();

View File

@ -338,7 +338,7 @@ class Instruction {
// Find the PC offset encoded in this instruction. 'this' may be a branch or // Find the PC offset encoded in this instruction. 'this' may be a branch or
// a PC-relative addressing instruction. // a PC-relative addressing instruction.
// The offset returned is unscaled. // The offset returned is unscaled.
ptrdiff_t ImmPCOffset(); int64_t ImmPCOffset();
// Find the target of this instruction. 'this' may be a branch or a // Find the target of this instruction. 'this' may be a branch or a
// PC-relative addressing instruction. // PC-relative addressing instruction.
@ -352,9 +352,9 @@ class Instruction {
// Patch a literal load instruction to load from 'source'. // Patch a literal load instruction to load from 'source'.
void SetImmLLiteral(Instruction* source); void SetImmLLiteral(Instruction* source);
uint8_t* LiteralAddress() { uintptr_t LiteralAddress() {
int offset = ImmLLiteral() << kLoadLiteralScaleLog2; int offset = ImmLLiteral() << kLoadLiteralScaleLog2;
return reinterpret_cast<uint8_t*>(this) + offset; return reinterpret_cast<uintptr_t>(this) + offset;
} }
enum CheckAlignment { NO_CHECK, CHECK_ALIGNMENT }; enum CheckAlignment { NO_CHECK, CHECK_ALIGNMENT };

View File

@ -107,7 +107,7 @@ Instrument::Instrument(const char* datafile, uint64_t sample_period)
} }
} }
static const int num_counters = ARRAY_SIZE(kCounterList); static const int num_counters = arraysize(kCounterList);
// Dump an instrumentation description comment at the top of the file. // Dump an instrumentation description comment at the top of the file.
fprintf(output_stream_, "# counters=%d\n", num_counters); fprintf(output_stream_, "# counters=%d\n", num_counters);

View File

@ -0,0 +1,368 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
#include "src/interface-descriptors.h"
namespace v8 {
namespace internal {
const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
const Register LoadDescriptor::ReceiverRegister() { return x1; }
const Register LoadDescriptor::NameRegister() { return x2; }
const Register VectorLoadICTrampolineDescriptor::SlotRegister() { return x0; }
const Register VectorLoadICDescriptor::VectorRegister() { return x3; }
const Register StoreDescriptor::ReceiverRegister() { return x1; }
const Register StoreDescriptor::NameRegister() { return x2; }
const Register StoreDescriptor::ValueRegister() { return x0; }
const Register ElementTransitionAndStoreDescriptor::MapRegister() { return x3; }
const Register InstanceofDescriptor::left() {
// Object to check (instanceof lhs).
return x11;
}
const Register InstanceofDescriptor::right() {
// Constructor function (instanceof rhs).
return x10;
}
const Register ArgumentsAccessReadDescriptor::index() { return x1; }
const Register ArgumentsAccessReadDescriptor::parameter_count() { return x0; }
const Register ApiGetterDescriptor::function_address() { return x2; }
const Register MathPowTaggedDescriptor::exponent() { return x11; }
const Register MathPowIntegerDescriptor::exponent() { return x12; }
void FastNewClosureDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// cp: context
// x2: function info
Register registers[] = {cp, x2};
data->Initialize(arraysize(registers), registers, NULL);
}
void FastNewContextDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// cp: context
// x1: function
Register registers[] = {cp, x1};
data->Initialize(arraysize(registers), registers, NULL);
}
void ToNumberDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// cp: context
// x0: value
Register registers[] = {cp, x0};
data->Initialize(arraysize(registers), registers, NULL);
}
void NumberToStringDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// cp: context
// x0: value
Register registers[] = {cp, x0};
data->Initialize(arraysize(registers), registers, NULL);
}
void FastCloneShallowArrayDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
// cp: context
// x3: array literals array
// x2: array literal index
// x1: constant elements
Register registers[] = {cp, x3, x2, x1};
Representation representations[] = {
Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
Representation::Tagged()};
data->Initialize(arraysize(registers), registers, representations);
}
void FastCloneShallowObjectDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
// cp: context
// x3: object literals array
// x2: object literal index
// x1: constant properties
// x0: object literal flags
Register registers[] = {cp, x3, x2, x1, x0};
data->Initialize(arraysize(registers), registers, NULL);
}
void CreateAllocationSiteDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
// cp: context
// x2: feedback vector
// x3: call feedback slot
Register registers[] = {cp, x2, x3};
data->Initialize(arraysize(registers), registers, NULL);
}
void StoreArrayLiteralElementDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, x3, x0};
data->Initialize(arraysize(registers), registers, NULL);
}
void CallFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// x1 function the function to call
Register registers[] = {cp, x1};
data->Initialize(arraysize(registers), registers, NULL);
}
void CallFunctionWithFeedbackDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
Register registers[] = {cp, x1, x3};
Representation representations[] = {Representation::Tagged(),
Representation::Tagged(),
Representation::Smi()};
data->Initialize(arraysize(registers), registers, representations);
}
void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// x0 : number of arguments
// x1 : the function to call
// x2 : feedback vector
// x3 : slot in feedback vector (smi) (if r2 is not the megamorphic symbol)
// TODO(turbofan): So far we don't gather type feedback and hence skip the
// slot parameter, but ArrayConstructStub needs the vector to be undefined.
Register registers[] = {cp, x0, x1, x2};
data->Initialize(arraysize(registers), registers, NULL);
}
void RegExpConstructResultDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
// cp: context
// x2: length
// x1: index (of last match)
// x0: string
Register registers[] = {cp, x2, x1, x0};
data->Initialize(arraysize(registers), registers, NULL);
}
void TransitionElementsKindDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
// cp: context
// x0: value (js_array)
// x1: to_map
Register registers[] = {cp, x0, x1};
data->Initialize(arraysize(registers), registers, NULL);
}
void ArrayConstructorConstantArgCountDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
// cp: context
// x1: function
// x2: allocation site with elements kind
// x0: number of arguments to the constructor function
Register registers[] = {cp, x1, x2};
data->Initialize(arraysize(registers), registers, NULL);
}
void ArrayConstructorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
Register registers[] = {cp, x1, x2, x0};
Representation representations[] = {
Representation::Tagged(), Representation::Tagged(),
Representation::Tagged(), Representation::Integer32()};
data->Initialize(arraysize(registers), registers, representations);
}
void InternalArrayConstructorConstantArgCountDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
// cp: context
// x1: constructor function
// x0: number of arguments to the constructor function
Register registers[] = {cp, x1};
data->Initialize(arraysize(registers), registers, NULL);
}
void InternalArrayConstructorDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
// stack param count needs (constructor pointer, and single argument)
Register registers[] = {cp, x1, x0};
Representation representations[] = {Representation::Tagged(),
Representation::Tagged(),
Representation::Integer32()};
data->Initialize(arraysize(registers), registers, representations);
}
void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// cp: context
// x0: value to compare
Register registers[] = {cp, x0};
data->Initialize(arraysize(registers), registers, NULL);
}
void ToBooleanDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// cp: context
// x0: value
Register registers[] = {cp, x0};
data->Initialize(arraysize(registers), registers, NULL);
}
void BinaryOpDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// cp: context
// x1: left operand
// x0: right operand
Register registers[] = {cp, x1, x0};
data->Initialize(arraysize(registers), registers, NULL);
}
void BinaryOpWithAllocationSiteDescriptor::Initialize(
CallInterfaceDescriptorData* data) {
// cp: context
// x2: allocation site
// x1: left operand
// x0: right operand
Register registers[] = {cp, x2, x1, x0};
data->Initialize(arraysize(registers), registers, NULL);
}
void StringAddDescriptor::Initialize(CallInterfaceDescriptorData* data) {
// cp: context
// x1: left operand
// x0: right operand
Register registers[] = {cp, x1, x0};
data->Initialize(arraysize(registers), registers, NULL);
}
void KeyedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor noInlineDescriptor =
PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
Register registers[] = {
cp, // context
x2, // key
};
Representation representations[] = {
Representation::Tagged(), // context
Representation::Tagged(), // key
};
data->Initialize(arraysize(registers), registers, representations,
&noInlineDescriptor);
}
void NamedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor noInlineDescriptor =
PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
Register registers[] = {
cp, // context
x2, // name
};
Representation representations[] = {
Representation::Tagged(), // context
Representation::Tagged(), // name
};
data->Initialize(arraysize(registers), registers, representations,
&noInlineDescriptor);
}
void CallHandlerDescriptor::Initialize(CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
cp, // context
x0, // receiver
};
Representation representations[] = {
Representation::Tagged(), // context
Representation::Tagged(), // receiver
};
data->Initialize(arraysize(registers), registers, representations,
&default_descriptor);
}
void ArgumentAdaptorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
cp, // context
x1, // JSFunction
x0, // actual number of arguments
x2, // expected number of arguments
};
Representation representations[] = {
Representation::Tagged(), // context
Representation::Tagged(), // JSFunction
Representation::Integer32(), // actual number of arguments
Representation::Integer32(), // expected number of arguments
};
data->Initialize(arraysize(registers), registers, representations,
&default_descriptor);
}
void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
static PlatformInterfaceDescriptor default_descriptor =
PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
Register registers[] = {
cp, // context
x0, // callee
x4, // call_data
x2, // holder
x1, // api_function_address
};
Representation representations[] = {
Representation::Tagged(), // context
Representation::Tagged(), // callee
Representation::Tagged(), // call_data
Representation::Tagged(), // holder
Representation::External(), // api_function_address
};
data->Initialize(arraysize(registers), registers, representations,
&default_descriptor);
}
}
} // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM64

View File

@ -0,0 +1,26 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_ARM64_INTERFACE_DESCRIPTORS_ARM64_H_
#define V8_ARM64_INTERFACE_DESCRIPTORS_ARM64_H_
#include "src/interface-descriptors.h"
namespace v8 {
namespace internal {
class PlatformInterfaceDescriptor {
public:
explicit PlatformInterfaceDescriptor(TargetAddressStorageMode storage_mode)
: storage_mode_(storage_mode) {}
TargetAddressStorageMode storage_mode() { return storage_mode_; }
private:
TargetAddressStorageMode storage_mode_;
};
}
} // namespace v8::internal
#endif // V8_ARM64_INTERFACE_DESCRIPTORS_ARM64_H_

View File

@ -354,12 +354,6 @@ const char* LArithmeticT::Mnemonic() const {
} }
void LChunkBuilder::Abort(BailoutReason reason) {
info()->set_bailout_reason(reason);
status_ = ABORTED;
}
LUnallocated* LChunkBuilder::ToUnallocated(Register reg) { LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER, return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
Register::ToAllocationIndex(reg)); Register::ToAllocationIndex(reg));
@ -1036,14 +1030,14 @@ LInstruction* LChunkBuilder::DoCallJSFunction(
LInstruction* LChunkBuilder::DoCallWithDescriptor( LInstruction* LChunkBuilder::DoCallWithDescriptor(
HCallWithDescriptor* instr) { HCallWithDescriptor* instr) {
const InterfaceDescriptor* descriptor = instr->descriptor(); CallInterfaceDescriptor descriptor = instr->descriptor();
LOperand* target = UseRegisterOrConstantAtStart(instr->target()); LOperand* target = UseRegisterOrConstantAtStart(instr->target());
ZoneList<LOperand*> ops(instr->OperandCount(), zone()); ZoneList<LOperand*> ops(instr->OperandCount(), zone());
ops.Add(target, zone()); ops.Add(target, zone());
for (int i = 1; i < instr->OperandCount(); i++) { for (int i = 1; i < instr->OperandCount(); i++) {
LOperand* op = UseFixed(instr->OperandAt(i), LOperand* op =
descriptor->GetParameterRegister(i - 1)); UseFixed(instr->OperandAt(i), descriptor.GetParameterRegister(i - 1));
ops.Add(op, zone()); ops.Add(op, zone());
} }
@ -1252,7 +1246,6 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
DCHECK(input_rep.IsSmiOrTagged()); DCHECK(input_rep.IsSmiOrTagged());
return AssignEnvironment( return AssignEnvironment(
DefineAsRegister(new(zone()) LClampTToUint8(reg, DefineAsRegister(new(zone()) LClampTToUint8(reg,
TempRegister(),
TempDoubleRegister()))); TempDoubleRegister())));
} }
} }
@ -1475,6 +1468,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
instr->arguments_object()->IsLinked()) { instr->arguments_object()->IsLinked()) {
inner->Bind(instr->arguments_var(), instr->arguments_object()); inner->Bind(instr->arguments_var(), instr->arguments_object());
} }
inner->BindContext(instr->closure_context());
inner->set_entry(instr); inner->set_entry(instr);
current_block_->UpdateEnvironment(inner); current_block_->UpdateEnvironment(inner);
chunk_->AddInlinedClosure(instr->closure()); chunk_->AddInlinedClosure(instr->closure());
@ -1561,6 +1555,19 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
} }
LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
HTailCallThroughMegamorphicCache* instr) {
LOperand* context = UseFixed(instr->context(), cp);
LOperand* receiver_register =
UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
LOperand* name_register =
UseFixed(instr->name(), LoadDescriptor::NameRegister());
// Not marked as call. It can't deoptimize, and it never returns.
return new (zone()) LTailCallThroughMegamorphicCache(
context, receiver_register, name_register);
}
LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) { LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LOperand* context = UseFixed(instr->context(), cp); LOperand* context = UseFixed(instr->context(), cp);
// The function is required (by MacroAssembler::InvokeFunction) to be in x1. // The function is required (by MacroAssembler::InvokeFunction) to be in x1.
@ -1663,11 +1670,11 @@ LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) { LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp); LOperand* context = UseFixed(instr->context(), cp);
LOperand* global_object = UseFixed(instr->global_object(), LOperand* global_object =
LoadIC::ReceiverRegister()); UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL; LOperand* vector = NULL;
if (FLAG_vector_ics) { if (FLAG_vector_ics) {
vector = FixedTemp(LoadIC::VectorRegister()); vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
} }
LLoadGlobalGeneric* result = LLoadGlobalGeneric* result =
@ -1725,11 +1732,12 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) { LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp); LOperand* context = UseFixed(instr->context(), cp);
LOperand* object = UseFixed(instr->object(), LoadIC::ReceiverRegister()); LOperand* object =
LOperand* key = UseFixed(instr->key(), LoadIC::NameRegister()); UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
LOperand* vector = NULL; LOperand* vector = NULL;
if (FLAG_vector_ics) { if (FLAG_vector_ics) {
vector = FixedTemp(LoadIC::VectorRegister()); vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
} }
LInstruction* result = LInstruction* result =
@ -1747,10 +1755,11 @@ LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) { LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp); LOperand* context = UseFixed(instr->context(), cp);
LOperand* object = UseFixed(instr->object(), LoadIC::ReceiverRegister()); LOperand* object =
UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
LOperand* vector = NULL; LOperand* vector = NULL;
if (FLAG_vector_ics) { if (FLAG_vector_ics) {
vector = FixedTemp(LoadIC::VectorRegister()); vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
} }
LInstruction* result = LInstruction* result =
@ -1934,12 +1943,12 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
int32_t constant_abs = Abs(constant); int32_t constant_abs = Abs(constant);
if (!end_range_constant && if (!end_range_constant &&
(small_constant || (small_constant || (base::bits::IsPowerOfTwo32(constant_abs)) ||
(IsPowerOf2(constant_abs)) || (!can_overflow && (base::bits::IsPowerOfTwo32(constant_abs + 1) ||
(!can_overflow && (IsPowerOf2(constant_abs + 1) || base::bits::IsPowerOfTwo32(constant_abs - 1))))) {
IsPowerOf2(constant_abs - 1))))) {
LConstantOperand* right = UseConstant(most_const); LConstantOperand* right = UseConstant(most_const);
bool need_register = IsPowerOf2(constant_abs) && !small_constant; bool need_register =
base::bits::IsPowerOfTwo32(constant_abs) && !small_constant;
LOperand* left = need_register ? UseRegister(least_const) LOperand* left = need_register ? UseRegister(least_const)
: UseRegisterAtStart(least_const); : UseRegisterAtStart(least_const);
LInstruction* result = LInstruction* result =
@ -1985,10 +1994,10 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
return DefineAsSpilled(result, spill_index); return DefineAsSpilled(result, spill_index);
} else { } else {
DCHECK(info()->IsStub()); DCHECK(info()->IsStub());
CodeStubInterfaceDescriptor* descriptor = CallInterfaceDescriptor descriptor =
info()->code_stub()->GetInterfaceDescriptor(); info()->code_stub()->GetCallInterfaceDescriptor();
int index = static_cast<int>(instr->index()); int index = static_cast<int>(instr->index());
Register reg = descriptor->GetEnvironmentParameterRegister(index); Register reg = descriptor.GetEnvironmentParameterRegister(index);
return DefineFixed(result, reg); return DefineFixed(result, reg);
} }
} }
@ -2001,11 +2010,14 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
Representation exponent_type = instr->right()->representation(); Representation exponent_type = instr->right()->representation();
DCHECK(instr->left()->representation().IsDouble()); DCHECK(instr->left()->representation().IsDouble());
LOperand* left = UseFixedDouble(instr->left(), d0); LOperand* left = UseFixedDouble(instr->left(), d0);
LOperand* right = exponent_type.IsInteger32() LOperand* right;
? UseFixed(instr->right(), x12) if (exponent_type.IsInteger32()) {
: exponent_type.IsDouble() right = UseFixed(instr->right(), MathPowIntegerDescriptor::exponent());
? UseFixedDouble(instr->right(), d1) } else if (exponent_type.IsDouble()) {
: UseFixed(instr->right(), x11); right = UseFixedDouble(instr->right(), d1);
} else {
right = UseFixed(instr->right(), MathPowTaggedDescriptor::exponent());
}
LPower* result = new(zone()) LPower(left, right); LPower* result = new(zone()) LPower(left, right);
return MarkAsCall(DefineFixedDouble(result, d0), return MarkAsCall(DefineFixedDouble(result, d0),
instr, instr,
@ -2203,8 +2215,7 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
return DoArithmeticT(op, instr); return DoArithmeticT(op, instr);
} }
DCHECK(instr->representation().IsInteger32() || DCHECK(instr->representation().IsSmiOrInteger32());
instr->representation().IsSmi());
DCHECK(instr->left()->representation().Equals(instr->representation())); DCHECK(instr->left()->representation().Equals(instr->representation()));
DCHECK(instr->right()->representation().Equals(instr->representation())); DCHECK(instr->right()->representation().Equals(instr->representation()));
@ -2215,42 +2226,30 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
LOperand* left = instr->representation().IsSmi() LOperand* left = instr->representation().IsSmi()
? UseRegister(instr->left()) ? UseRegister(instr->left())
: UseRegisterAtStart(instr->left()); : UseRegisterAtStart(instr->left());
LOperand* right = UseRegisterOrConstantAtStart(instr->right());
HValue* right_value = instr->right(); // The only shift that can deoptimize is `left >>> 0`, where left is negative.
LOperand* right = NULL; // In these cases, the result is a uint32 that is too large for an int32.
LOperand* temp = NULL; bool right_can_be_zero = !instr->right()->IsConstant() ||
int constant_value = 0; (JSShiftAmountFromHConstant(instr->right()) == 0);
if (right_value->IsConstant()) { bool can_deopt = false;
right = UseConstant(right_value); if ((op == Token::SHR) && right_can_be_zero) {
constant_value = JSShiftAmountFromHConstant(right_value);
} else {
right = UseRegisterAtStart(right_value);
if (op == Token::ROR) {
temp = TempRegister();
}
}
// Shift operations can only deoptimize if we do a logical shift by 0 and the
// result cannot be truncated to int32.
bool does_deopt = false;
if ((op == Token::SHR) && (constant_value == 0)) {
if (FLAG_opt_safe_uint32_operations) { if (FLAG_opt_safe_uint32_operations) {
does_deopt = !instr->CheckFlag(HInstruction::kUint32); can_deopt = !instr->CheckFlag(HInstruction::kUint32);
} else { } else {
does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32); can_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
} }
} }
LInstruction* result; LInstruction* result;
if (instr->representation().IsInteger32()) { if (instr->representation().IsInteger32()) {
result = DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt)); result = DefineAsRegister(new (zone()) LShiftI(op, left, right, can_deopt));
} else { } else {
DCHECK(instr->representation().IsSmi()); DCHECK(instr->representation().IsSmi());
result = DefineAsRegister( result = DefineAsRegister(new (zone()) LShiftS(op, left, right, can_deopt));
new(zone()) LShiftS(op, left, right, temp, does_deopt));
} }
return does_deopt ? AssignEnvironment(result) : result; return can_deopt ? AssignEnvironment(result) : result;
} }
@ -2379,10 +2378,10 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) { LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp); LOperand* context = UseFixed(instr->context(), cp);
LOperand* object = UseFixed(instr->object(), LOperand* object =
KeyedStoreIC::ReceiverRegister()); UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
LOperand* key = UseFixed(instr->key(), KeyedStoreIC::NameRegister()); LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
LOperand* value = UseFixed(instr->value(), KeyedStoreIC::ValueRegister()); LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
DCHECK(instr->object()->representation().IsTagged()); DCHECK(instr->object()->representation().IsTagged());
DCHECK(instr->key()->representation().IsTagged()); DCHECK(instr->key()->representation().IsTagged());
@ -2424,8 +2423,9 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) { LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), cp); LOperand* context = UseFixed(instr->context(), cp);
LOperand* object = UseFixed(instr->object(), StoreIC::ReceiverRegister()); LOperand* object =
LOperand* value = UseFixed(instr->value(), StoreIC::ValueRegister()); UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
LInstruction* result = new(zone()) LStoreNamedGeneric(context, object, value); LInstruction* result = new(zone()) LStoreNamedGeneric(context, object, value);
return MarkAsCall(result, instr); return MarkAsCall(result, instr);
@ -2682,7 +2682,7 @@ LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
} else { } else {
spill_index = env_index - instr->environment()->first_local_index(); spill_index = env_index - instr->environment()->first_local_index();
if (spill_index > LUnallocated::kMaxFixedSlotIndex) { if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
Abort(kTooManySpillSlotsNeededForOSR); Retry(kTooManySpillSlotsNeededForOSR);
spill_index = 0; spill_index = 0;
} }
} }

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -27,7 +27,7 @@ class LCodeGen: public LCodeGenBase {
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info) LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
: LCodeGenBase(chunk, assembler, info), : LCodeGenBase(chunk, assembler, info),
deoptimizations_(4, info->zone()), deoptimizations_(4, info->zone()),
deopt_jump_table_(4, info->zone()), jump_table_(4, info->zone()),
deoptimization_literals_(8, info->zone()), deoptimization_literals_(8, info->zone()),
inlined_function_count_(0), inlined_function_count_(0),
scope_(info->scope()), scope_(info->scope()),
@ -83,31 +83,17 @@ class LCodeGen: public LCodeGenBase {
enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 }; enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
// Support for converting LOperands to assembler types. // Support for converting LOperands to assembler types.
// LOperand must be a register.
Register ToRegister(LOperand* op) const; Register ToRegister(LOperand* op) const;
Register ToRegister32(LOperand* op) const; Register ToRegister32(LOperand* op) const;
Operand ToOperand(LOperand* op); Operand ToOperand(LOperand* op);
Operand ToOperand32I(LOperand* op); Operand ToOperand32(LOperand* op);
Operand ToOperand32U(LOperand* op);
enum StackMode { kMustUseFramePointer, kCanUseStackPointer }; enum StackMode { kMustUseFramePointer, kCanUseStackPointer };
MemOperand ToMemOperand(LOperand* op, MemOperand ToMemOperand(LOperand* op,
StackMode stack_mode = kCanUseStackPointer) const; StackMode stack_mode = kCanUseStackPointer) const;
Handle<Object> ToHandle(LConstantOperand* op) const; Handle<Object> ToHandle(LConstantOperand* op) const;
template <class LI> template <class LI>
Operand ToShiftedRightOperand32I(LOperand* right, Operand ToShiftedRightOperand32(LOperand* right, LI* shift_info);
LI* shift_info) {
return ToShiftedRightOperand32(right, shift_info, SIGNED_INT32);
}
template<class LI>
Operand ToShiftedRightOperand32U(LOperand* right,
LI* shift_info) {
return ToShiftedRightOperand32(right, shift_info, UNSIGNED_INT32);
}
template<class LI>
Operand ToShiftedRightOperand32(LOperand* right,
LI* shift_info,
IntegerSignedness signedness);
int JSShiftAmountFromLConstant(LOperand* constant) { int JSShiftAmountFromLConstant(LOperand* constant) {
return ToInteger32(LConstantOperand::cast(constant)) & 0x1f; return ToInteger32(LConstantOperand::cast(constant)) & 0x1f;
@ -158,8 +144,6 @@ class LCodeGen: public LCodeGenBase {
Register object, Register object,
Register index); Register index);
Operand ToOperand32(LOperand* op, IntegerSignedness signedness);
static Condition TokenToCondition(Token::Value op, bool is_unsigned); static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block); void EmitGoto(int block);
void DoGap(LGap* instr); void DoGap(LGap* instr);
@ -212,6 +196,9 @@ class LCodeGen: public LCodeGenBase {
int* offset, int* offset,
AllocationSiteMode mode); AllocationSiteMode mode);
template <class T>
void EmitVectorLoadICRegisters(T* instr);
// Emits optimized code for %_IsString(x). Preserves input register. // Emits optimized code for %_IsString(x). Preserves input register.
// Returns the condition on which a final split to // Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough. // true and false label should be made, to optimize fallthrough.
@ -226,27 +213,31 @@ class LCodeGen: public LCodeGenBase {
Register temp, Register temp,
LOperand* index, LOperand* index,
String::Encoding encoding); String::Encoding encoding);
void DeoptimizeBranch( void DeoptimizeBranch(LInstruction* instr, const char* detail,
LEnvironment* environment, BranchType branch_type, Register reg = NoReg,
BranchType branch_type, Register reg = NoReg, int bit = -1, int bit = -1,
Deoptimizer::BailoutType* override_bailout_type = NULL); Deoptimizer::BailoutType* override_bailout_type = NULL);
void Deoptimize(LEnvironment* environment, void Deoptimize(LInstruction* instr, const char* detail,
Deoptimizer::BailoutType* override_bailout_type = NULL); Deoptimizer::BailoutType* override_bailout_type = NULL);
void DeoptimizeIf(Condition cond, LEnvironment* environment); void DeoptimizeIf(Condition cond, LInstruction* instr, const char* detail);
void DeoptimizeIfZero(Register rt, LEnvironment* environment); void DeoptimizeIfZero(Register rt, LInstruction* instr, const char* detail);
void DeoptimizeIfNotZero(Register rt, LEnvironment* environment); void DeoptimizeIfNotZero(Register rt, LInstruction* instr,
void DeoptimizeIfNegative(Register rt, LEnvironment* environment); const char* detail);
void DeoptimizeIfSmi(Register rt, LEnvironment* environment); void DeoptimizeIfNegative(Register rt, LInstruction* instr,
void DeoptimizeIfNotSmi(Register rt, LEnvironment* environment); const char* detail);
void DeoptimizeIfRoot(Register rt, void DeoptimizeIfSmi(Register rt, LInstruction* instr, const char* detail);
Heap::RootListIndex index, void DeoptimizeIfNotSmi(Register rt, LInstruction* instr, const char* detail);
LEnvironment* environment); void DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
void DeoptimizeIfNotRoot(Register rt, LInstruction* instr, const char* detail);
Heap::RootListIndex index, void DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
LEnvironment* environment); LInstruction* instr, const char* detail);
void DeoptimizeIfMinusZero(DoubleRegister input, LEnvironment* environment); void DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr);
void DeoptimizeIfBitSet(Register rt, int bit, LEnvironment* environment); void DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
void DeoptimizeIfBitClear(Register rt, int bit, LEnvironment* environment); const char* detail);
void DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr,
const char* detail);
void DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
const char* detail);
MemOperand PrepareKeyedExternalArrayOperand(Register key, MemOperand PrepareKeyedExternalArrayOperand(Register key,
Register base, Register base,
@ -286,10 +277,10 @@ class LCodeGen: public LCodeGenBase {
void RestoreCallerDoubles(); void RestoreCallerDoubles();
// Code generation steps. Returns true if code generation should continue. // Code generation steps. Returns true if code generation should continue.
void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE; void GenerateBodyInstructionPre(LInstruction* instr) OVERRIDE;
bool GeneratePrologue(); bool GeneratePrologue();
bool GenerateDeferredCode(); bool GenerateDeferredCode();
bool GenerateDeoptJumpTable(); bool GenerateJumpTable();
bool GenerateSafepointTable(); bool GenerateSafepointTable();
// Generates the custom OSR entrypoint and sets the osr_pc_offset. // Generates the custom OSR entrypoint and sets the osr_pc_offset.
@ -338,7 +329,7 @@ class LCodeGen: public LCodeGenBase {
Register function_reg = NoReg); Register function_reg = NoReg);
// Support for recording safepoint and position information. // Support for recording safepoint and position information.
void RecordAndWritePosition(int position) V8_OVERRIDE; void RecordAndWritePosition(int position) OVERRIDE;
void RecordSafepoint(LPointerMap* pointers, void RecordSafepoint(LPointerMap* pointers,
Safepoint::Kind kind, Safepoint::Kind kind,
int arguments, int arguments,
@ -351,10 +342,10 @@ class LCodeGen: public LCodeGenBase {
void RecordSafepointWithLazyDeopt(LInstruction* instr, void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode); SafepointMode safepoint_mode);
void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE; void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE;
ZoneList<LEnvironment*> deoptimizations_; ZoneList<LEnvironment*> deoptimizations_;
ZoneList<Deoptimizer::JumpTableEntry*> deopt_jump_table_; ZoneList<Deoptimizer::JumpTableEntry*> jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_; ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_; int inlined_function_count_;
Scope* const scope_; Scope* const scope_;

View File

@ -13,6 +13,7 @@
#include "src/arm64/assembler-arm64.h" #include "src/arm64/assembler-arm64.h"
#include "src/arm64/instrument-arm64.h" #include "src/arm64/instrument-arm64.h"
#include "src/arm64/macro-assembler-arm64.h" #include "src/arm64/macro-assembler-arm64.h"
#include "src/base/bits.h"
namespace v8 { namespace v8 {
@ -1520,7 +1521,7 @@ void MacroAssembler::Claim(uint64_t count, uint64_t unit_size) {
void MacroAssembler::Claim(const Register& count, uint64_t unit_size) { void MacroAssembler::Claim(const Register& count, uint64_t unit_size) {
if (unit_size == 0) return; if (unit_size == 0) return;
DCHECK(IsPowerOf2(unit_size)); DCHECK(base::bits::IsPowerOfTwo64(unit_size));
const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits); const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits);
const Operand size(count, LSL, shift); const Operand size(count, LSL, shift);
@ -1538,7 +1539,7 @@ void MacroAssembler::Claim(const Register& count, uint64_t unit_size) {
void MacroAssembler::ClaimBySMI(const Register& count_smi, uint64_t unit_size) { void MacroAssembler::ClaimBySMI(const Register& count_smi, uint64_t unit_size) {
DCHECK(unit_size == 0 || IsPowerOf2(unit_size)); DCHECK(unit_size == 0 || base::bits::IsPowerOfTwo64(unit_size));
const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift; const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift;
const Operand size(count_smi, const Operand size(count_smi,
(shift >= 0) ? (LSL) : (LSR), (shift >= 0) ? (LSL) : (LSR),
@ -1578,7 +1579,7 @@ void MacroAssembler::Drop(uint64_t count, uint64_t unit_size) {
void MacroAssembler::Drop(const Register& count, uint64_t unit_size) { void MacroAssembler::Drop(const Register& count, uint64_t unit_size) {
if (unit_size == 0) return; if (unit_size == 0) return;
DCHECK(IsPowerOf2(unit_size)); DCHECK(base::bits::IsPowerOfTwo64(unit_size));
const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits); const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits);
const Operand size(count, LSL, shift); const Operand size(count, LSL, shift);
@ -1599,7 +1600,7 @@ void MacroAssembler::Drop(const Register& count, uint64_t unit_size) {
void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) { void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) {
DCHECK(unit_size == 0 || IsPowerOf2(unit_size)); DCHECK(unit_size == 0 || base::bits::IsPowerOfTwo64(unit_size));
const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift; const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift;
const Operand size(count_smi, const Operand size(count_smi,
(shift >= 0) ? (LSL) : (LSR), (shift >= 0) ? (LSL) : (LSR),

View File

@ -6,12 +6,14 @@
#if V8_TARGET_ARCH_ARM64 #if V8_TARGET_ARCH_ARM64
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/bootstrapper.h" #include "src/bootstrapper.h"
#include "src/codegen.h" #include "src/codegen.h"
#include "src/cpu-profiler.h" #include "src/cpu-profiler.h"
#include "src/debug.h" #include "src/debug.h"
#include "src/isolate-inl.h" #include "src/isolate-inl.h"
#include "src/runtime.h" #include "src/runtime/runtime.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -1656,12 +1658,6 @@ void MacroAssembler::ThrowUncatchable(Register value,
} }
void MacroAssembler::SmiAbs(const Register& smi, Label* slow) {
DCHECK(smi.Is64Bits());
Abs(smi, smi, slow);
}
void MacroAssembler::AssertSmi(Register object, BailoutReason reason) { void MacroAssembler::AssertSmi(Register object, BailoutReason reason) {
if (emit_debug_code()) { if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTag == 0);
@ -2059,7 +2055,7 @@ void MacroAssembler::CallCFunction(Register function,
int sp_alignment = ActivationFrameAlignment(); int sp_alignment = ActivationFrameAlignment();
// The ABI mandates at least 16-byte alignment. // The ABI mandates at least 16-byte alignment.
DCHECK(sp_alignment >= 16); DCHECK(sp_alignment >= 16);
DCHECK(IsPowerOf2(sp_alignment)); DCHECK(base::bits::IsPowerOfTwo32(sp_alignment));
// The current stack pointer is a callee saved register, and is preserved // The current stack pointer is a callee saved register, and is preserved
// across the call. // across the call.
@ -2251,58 +2247,38 @@ int MacroAssembler::CallSize(Handle<Code> code,
} }
void MacroAssembler::JumpIfHeapNumber(Register object, Label* on_heap_number,
SmiCheckType smi_check_type) {
Label on_not_heap_number;
if (smi_check_type == DO_SMI_CHECK) {
JumpIfSmi(object, &on_not_heap_number);
}
void MacroAssembler::JumpForHeapNumber(Register object,
Register heap_number_map,
Label* on_heap_number,
Label* on_not_heap_number) {
DCHECK(on_heap_number || on_not_heap_number);
AssertNotSmi(object); AssertNotSmi(object);
UseScratchRegisterScope temps(this); UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX(); Register temp = temps.AcquireX();
// Load the HeapNumber map if it is not passed.
if (heap_number_map.Is(NoReg)) {
heap_number_map = temps.AcquireX();
LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
} else {
AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
}
DCHECK(!AreAliased(temp, heap_number_map));
Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
Cmp(temp, heap_number_map); JumpIfRoot(temp, Heap::kHeapNumberMapRootIndex, on_heap_number);
if (on_heap_number) { Bind(&on_not_heap_number);
B(eq, on_heap_number);
}
if (on_not_heap_number) {
B(ne, on_not_heap_number);
}
}
void MacroAssembler::JumpIfHeapNumber(Register object,
Label* on_heap_number,
Register heap_number_map) {
JumpForHeapNumber(object,
heap_number_map,
on_heap_number,
NULL);
} }
void MacroAssembler::JumpIfNotHeapNumber(Register object, void MacroAssembler::JumpIfNotHeapNumber(Register object,
Label* on_not_heap_number, Label* on_not_heap_number,
Register heap_number_map) { SmiCheckType smi_check_type) {
JumpForHeapNumber(object, if (smi_check_type == DO_SMI_CHECK) {
heap_number_map, JumpIfSmi(object, on_not_heap_number);
NULL, }
on_not_heap_number);
AssertNotSmi(object);
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
JumpIfNotRoot(temp, Heap::kHeapNumberMapRootIndex, on_not_heap_number);
} }
@ -2336,8 +2312,7 @@ void MacroAssembler::LookupNumberStringCache(Register object,
Label load_result_from_cache; Label load_result_from_cache;
JumpIfSmi(object, &is_smi); JumpIfSmi(object, &is_smi);
CheckMap(object, scratch1, Heap::kHeapNumberMapRootIndex, not_found, JumpIfNotHeapNumber(object, not_found);
DONT_DO_SMI_CHECK);
STATIC_ASSERT(kDoubleSize == (kWRegSize * 2)); STATIC_ASSERT(kDoubleSize == (kWRegSize * 2));
Add(scratch1, object, HeapNumber::kValueOffset - kHeapObjectTag); Add(scratch1, object, HeapNumber::kValueOffset - kHeapObjectTag);
@ -2699,14 +2674,9 @@ void MacroAssembler::FillFields(Register dst,
} }
void MacroAssembler::JumpIfEitherIsNotSequentialAsciiStrings( void MacroAssembler::JumpIfEitherIsNotSequentialOneByteStrings(
Register first, Register first, Register second, Register scratch1, Register scratch2,
Register second, Label* failure, SmiCheckType smi_check) {
Register scratch1,
Register scratch2,
Label* failure,
SmiCheckType smi_check) {
if (smi_check == DO_SMI_CHECK) { if (smi_check == DO_SMI_CHECK) {
JumpIfEitherSmi(first, second, failure); JumpIfEitherSmi(first, second, failure);
} else if (emit_debug_code()) { } else if (emit_debug_code()) {
@ -2721,72 +2691,63 @@ void MacroAssembler::JumpIfEitherIsNotSequentialAsciiStrings(
Bind(&not_smi); Bind(&not_smi);
} }
// Test that both first and second are sequential ASCII strings. // Test that both first and second are sequential one-byte strings.
Ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset)); Ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
Ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset)); Ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
Ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset)); Ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
JumpIfEitherInstanceTypeIsNotSequentialAscii(scratch1, JumpIfEitherInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, scratch1,
scratch2, scratch2, failure);
scratch1,
scratch2,
failure);
} }
void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialAscii( void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialOneByte(
Register first, Register first, Register second, Register scratch1, Register scratch2,
Register second,
Register scratch1,
Register scratch2,
Label* failure) { Label* failure) {
DCHECK(!AreAliased(scratch1, second)); DCHECK(!AreAliased(scratch1, second));
DCHECK(!AreAliased(scratch1, scratch2)); DCHECK(!AreAliased(scratch1, scratch2));
static const int kFlatAsciiStringMask = static const int kFlatOneByteStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
static const int kFlatAsciiStringTag = ASCII_STRING_TYPE; static const int kFlatOneByteStringTag = ONE_BYTE_STRING_TYPE;
And(scratch1, first, kFlatAsciiStringMask); And(scratch1, first, kFlatOneByteStringMask);
And(scratch2, second, kFlatAsciiStringMask); And(scratch2, second, kFlatOneByteStringMask);
Cmp(scratch1, kFlatAsciiStringTag); Cmp(scratch1, kFlatOneByteStringTag);
Ccmp(scratch2, kFlatAsciiStringTag, NoFlag, eq); Ccmp(scratch2, kFlatOneByteStringTag, NoFlag, eq);
B(ne, failure); B(ne, failure);
} }
void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type, void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
Register scratch, Register scratch,
Label* failure) { Label* failure) {
const int kFlatAsciiStringMask = const int kFlatOneByteStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
const int kFlatAsciiStringTag = const int kFlatOneByteStringTag =
kStringTag | kOneByteStringTag | kSeqStringTag; kStringTag | kOneByteStringTag | kSeqStringTag;
And(scratch, type, kFlatAsciiStringMask); And(scratch, type, kFlatOneByteStringMask);
Cmp(scratch, kFlatAsciiStringTag); Cmp(scratch, kFlatOneByteStringTag);
B(ne, failure); B(ne, failure);
} }
void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii( void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
Register first, Register first, Register second, Register scratch1, Register scratch2,
Register second,
Register scratch1,
Register scratch2,
Label* failure) { Label* failure) {
DCHECK(!AreAliased(first, second, scratch1, scratch2)); DCHECK(!AreAliased(first, second, scratch1, scratch2));
const int kFlatAsciiStringMask = const int kFlatOneByteStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
const int kFlatAsciiStringTag = const int kFlatOneByteStringTag =
kStringTag | kOneByteStringTag | kSeqStringTag; kStringTag | kOneByteStringTag | kSeqStringTag;
And(scratch1, first, kFlatAsciiStringMask); And(scratch1, first, kFlatOneByteStringMask);
And(scratch2, second, kFlatAsciiStringMask); And(scratch2, second, kFlatOneByteStringMask);
Cmp(scratch1, kFlatAsciiStringTag); Cmp(scratch1, kFlatOneByteStringTag);
Ccmp(scratch2, kFlatAsciiStringTag, NoFlag, eq); Ccmp(scratch2, kFlatOneByteStringTag, NoFlag, eq);
B(ne, failure); B(ne, failure);
} }
void MacroAssembler::JumpIfNotUniqueName(Register type, void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register type,
Label* not_unique_name) { Label* not_unique_name) {
STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0)); STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
// if ((type is string && type is internalized) || type == SYMBOL_TYPE) { // if ((type is string && type is internalized) || type == SYMBOL_TYPE) {
@ -3013,12 +2974,22 @@ void MacroAssembler::TryConvertDoubleToInt64(Register result,
void MacroAssembler::TruncateDoubleToI(Register result, void MacroAssembler::TruncateDoubleToI(Register result,
DoubleRegister double_input) { DoubleRegister double_input) {
Label done; Label done;
DCHECK(jssp.Is(StackPointer()));
// Try to convert the double to an int64. If successful, the bottom 32 bits // Try to convert the double to an int64. If successful, the bottom 32 bits
// contain our truncated int32 result. // contain our truncated int32 result.
TryConvertDoubleToInt64(result, double_input, &done); TryConvertDoubleToInt64(result, double_input, &done);
const Register old_stack_pointer = StackPointer();
if (csp.Is(old_stack_pointer)) {
// This currently only happens during compiler-unittest. If it arises
// during regular code generation the DoubleToI stub should be updated to
// cope with csp and have an extra parameter indicating which stack pointer
// it should use.
Push(jssp, xzr); // Push xzr to maintain csp required 16-bytes alignment.
Mov(jssp, csp);
SetStackPointer(jssp);
}
// If we fell through then inline version didn't succeed - call stub instead. // If we fell through then inline version didn't succeed - call stub instead.
Push(lr, double_input); Push(lr, double_input);
@ -3030,8 +3001,15 @@ void MacroAssembler::TruncateDoubleToI(Register result,
true); // skip_fastpath true); // skip_fastpath
CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
Drop(1, kDoubleSize); // Drop the double input on the stack. DCHECK_EQ(xzr.SizeInBytes(), double_input.SizeInBytes());
Pop(lr); Pop(xzr, lr); // xzr to drop the double input on the stack.
if (csp.Is(old_stack_pointer)) {
Mov(csp, jssp);
SetStackPointer(csp);
AssertStackConsistency();
Pop(xzr, jssp);
}
Bind(&done); Bind(&done);
} }
@ -3556,10 +3534,8 @@ void MacroAssembler::AllocateTwoByteString(Register result,
} }
void MacroAssembler::AllocateAsciiString(Register result, void MacroAssembler::AllocateOneByteString(Register result, Register length,
Register length, Register scratch1, Register scratch2,
Register scratch1,
Register scratch2,
Register scratch3, Register scratch3,
Label* gc_required) { Label* gc_required) {
DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3)); DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3));
@ -3570,7 +3546,7 @@ void MacroAssembler::AllocateAsciiString(Register result,
Add(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize); Add(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
Bic(scratch1, scratch1, kObjectAlignmentMask); Bic(scratch1, scratch1, kObjectAlignmentMask);
// Allocate ASCII string in new space. // Allocate one-byte string in new space.
Allocate(scratch1, Allocate(scratch1,
result, result,
scratch2, scratch2,
@ -3579,11 +3555,8 @@ void MacroAssembler::AllocateAsciiString(Register result,
TAG_OBJECT); TAG_OBJECT);
// Set the map, length and hash field. // Set the map, length and hash field.
InitializeNewString(result, InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
length, scratch1, scratch2);
Heap::kAsciiStringMapRootIndex,
scratch1,
scratch2);
} }
@ -3603,8 +3576,7 @@ void MacroAssembler::AllocateTwoByteConsString(Register result,
} }
void MacroAssembler::AllocateAsciiConsString(Register result, void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
Register length,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
Label* gc_required) { Label* gc_required) {
@ -3615,11 +3587,8 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
gc_required, gc_required,
TAG_OBJECT); TAG_OBJECT);
InitializeNewString(result, InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
length, scratch1, scratch2);
Heap::kConsAsciiStringMapRootIndex,
scratch1,
scratch2);
} }
@ -3640,7 +3609,7 @@ void MacroAssembler::AllocateTwoByteSlicedString(Register result,
} }
void MacroAssembler::AllocateAsciiSlicedString(Register result, void MacroAssembler::AllocateOneByteSlicedString(Register result,
Register length, Register length,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
@ -3649,11 +3618,8 @@ void MacroAssembler::AllocateAsciiSlicedString(Register result,
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required, Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
TAG_OBJECT); TAG_OBJECT);
InitializeNewString(result, InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
length, scratch1, scratch2);
Heap::kSlicedAsciiStringMapRootIndex,
scratch1,
scratch2);
} }
@ -3754,8 +3720,15 @@ void MacroAssembler::CompareInstanceType(Register map,
} }
void MacroAssembler::CompareMap(Register obj, void MacroAssembler::CompareObjectMap(Register obj, Heap::RootListIndex index) {
Register scratch, UseScratchRegisterScope temps(this);
Register obj_map = temps.AcquireX();
Ldr(obj_map, FieldMemOperand(obj, HeapObject::kMapOffset));
CompareRoot(obj_map, index);
}
void MacroAssembler::CompareObjectMap(Register obj, Register scratch,
Handle<Map> map) { Handle<Map> map) {
Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
CompareMap(scratch, map); CompareMap(scratch, map);
@ -3777,7 +3750,7 @@ void MacroAssembler::CheckMap(Register obj,
JumpIfSmi(obj, fail); JumpIfSmi(obj, fail);
} }
CompareMap(obj, scratch, map); CompareObjectMap(obj, scratch, map);
B(ne, fail); B(ne, fail);
} }
@ -4017,8 +3990,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
JumpIfSmi(value_reg, &store_num); JumpIfSmi(value_reg, &store_num);
// Ensure that the object is a heap number. // Ensure that the object is a heap number.
CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(), JumpIfNotHeapNumber(value_reg, fail);
fail, DONT_DO_SMI_CHECK);
Ldr(fpscratch1, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); Ldr(fpscratch1, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
@ -4284,8 +4256,7 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
Bind(&store_buffer_overflow); Bind(&store_buffer_overflow);
Push(lr); Push(lr);
StoreBufferOverflowStub store_buffer_overflow_stub = StoreBufferOverflowStub store_buffer_overflow_stub(isolate(), fp_mode);
StoreBufferOverflowStub(isolate(), fp_mode);
CallStub(&store_buffer_overflow_stub); CallStub(&store_buffer_overflow_stub);
Pop(lr); Pop(lr);
@ -4424,8 +4395,8 @@ void MacroAssembler::RecordWriteField(
// Clobber clobbered input registers when running with the debug-code flag // Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors. // turned on to provoke errors.
if (emit_debug_code()) { if (emit_debug_code()) {
Mov(value, Operand(BitCast<int64_t>(kZapValue + 4))); Mov(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
Mov(scratch, Operand(BitCast<int64_t>(kZapValue + 8))); Mov(scratch, Operand(bit_cast<int64_t>(kZapValue + 8)));
} }
} }
@ -4444,7 +4415,7 @@ void MacroAssembler::RecordWriteForMap(Register object,
UseScratchRegisterScope temps(this); UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX(); Register temp = temps.AcquireX();
CompareMap(map, temp, isolate()->factory()->meta_map()); CompareObjectMap(map, temp, isolate()->factory()->meta_map());
Check(eq, kWrongAddressOrValuePassedToRecordWrite); Check(eq, kWrongAddressOrValuePassedToRecordWrite);
} }
@ -4496,8 +4467,8 @@ void MacroAssembler::RecordWriteForMap(Register object,
// Clobber clobbered registers when running with the debug-code flag // Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors. // turned on to provoke errors.
if (emit_debug_code()) { if (emit_debug_code()) {
Mov(dst, Operand(BitCast<int64_t>(kZapValue + 12))); Mov(dst, Operand(bit_cast<int64_t>(kZapValue + 12)));
Mov(map, Operand(BitCast<int64_t>(kZapValue + 16))); Mov(map, Operand(bit_cast<int64_t>(kZapValue + 16)));
} }
} }
@ -4569,8 +4540,8 @@ void MacroAssembler::RecordWrite(
// Clobber clobbered registers when running with the debug-code flag // Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors. // turned on to provoke errors.
if (emit_debug_code()) { if (emit_debug_code()) {
Mov(address, Operand(BitCast<int64_t>(kZapValue + 12))); Mov(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
Mov(value, Operand(BitCast<int64_t>(kZapValue + 16))); Mov(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
} }
} }
@ -4775,8 +4746,8 @@ void MacroAssembler::EnsureNotWhite(
Mov(length_scratch, ExternalString::kSize); Mov(length_scratch, ExternalString::kSize);
TestAndBranchIfAnySet(instance_type, kExternalStringTag, &is_data_object); TestAndBranchIfAnySet(instance_type, kExternalStringTag, &is_data_object);
// Sequential string, either ASCII or UC16. // Sequential string, either Latin1 or UC16.
// For ASCII (char-size of 1) we shift the smi tag away to get the length. // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
// For UC16 (char-size of 2) we just leave the smi tag in place, thereby // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
// getting the length multiplied by 2. // getting the length multiplied by 2.
DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4); DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
@ -5315,13 +5286,15 @@ void MacroAssembler::TruncatingDiv(Register result,
int32_t divisor) { int32_t divisor) {
DCHECK(!AreAliased(result, dividend)); DCHECK(!AreAliased(result, dividend));
DCHECK(result.Is32Bits() && dividend.Is32Bits()); DCHECK(result.Is32Bits() && dividend.Is32Bits());
MultiplierAndShift ms(divisor); base::MagicNumbersForDivision<uint32_t> mag =
Mov(result, ms.multiplier()); base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
Mov(result, mag.multiplier);
Smull(result.X(), dividend, result); Smull(result.X(), dividend, result);
Asr(result.X(), result.X(), 32); Asr(result.X(), result.X(), 32);
if (divisor > 0 && ms.multiplier() < 0) Add(result, result, dividend); bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
if (divisor < 0 && ms.multiplier() > 0) Sub(result, result, dividend); if (divisor > 0 && neg) Add(result, result, dividend);
if (ms.shift() > 0) Asr(result, result, ms.shift()); if (divisor < 0 && !neg && mag.multiplier > 0) Sub(result, result, dividend);
if (mag.shift > 0) Asr(result, result, mag.shift);
Add(result, result, Operand(dividend, LSR, 31)); Add(result, result, Operand(dividend, LSR, 31));
} }

View File

@ -7,9 +7,11 @@
#include <vector> #include <vector>
#include "src/bailout-reason.h"
#include "src/globals.h" #include "src/globals.h"
#include "src/arm64/assembler-arm64-inl.h" #include "src/arm64/assembler-arm64-inl.h"
#include "src/base/bits.h"
// Simulator specific helpers. // Simulator specific helpers.
#if USE_SIMULATOR #if USE_SIMULATOR
@ -808,7 +810,7 @@ class MacroAssembler : public Assembler {
int sp_alignment = ActivationFrameAlignment(); int sp_alignment = ActivationFrameAlignment();
// AAPCS64 mandates at least 16-byte alignment. // AAPCS64 mandates at least 16-byte alignment.
DCHECK(sp_alignment >= 16); DCHECK(sp_alignment >= 16);
DCHECK(IsPowerOf2(sp_alignment)); DCHECK(base::bits::IsPowerOfTwo32(sp_alignment));
Bic(csp, StackPointer(), sp_alignment - 1); Bic(csp, StackPointer(), sp_alignment - 1);
SetStackPointer(csp); SetStackPointer(csp);
} }
@ -909,11 +911,6 @@ class MacroAssembler : public Assembler {
inline void SmiTagAndPush(Register src); inline void SmiTagAndPush(Register src);
inline void SmiTagAndPush(Register src1, Register src2); inline void SmiTagAndPush(Register src1, Register src2);
// Compute the absolute value of 'smi' and leave the result in 'smi'
// register. If 'smi' is the most negative SMI, the absolute value cannot
// be represented as a SMI and a jump to 'slow' is done.
void SmiAbs(const Register& smi, Label* slow);
inline void JumpIfSmi(Register value, inline void JumpIfSmi(Register value,
Label* smi_label, Label* smi_label,
Label* not_smi_label = NULL); Label* not_smi_label = NULL);
@ -950,16 +947,10 @@ class MacroAssembler : public Assembler {
// Abort execution if argument is not a string, enabled via --debug-code. // Abort execution if argument is not a string, enabled via --debug-code.
void AssertString(Register object); void AssertString(Register object);
void JumpForHeapNumber(Register object, void JumpIfHeapNumber(Register object, Label* on_heap_number,
Register heap_number_map, SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
Label* on_heap_number, void JumpIfNotHeapNumber(Register object, Label* on_not_heap_number,
Label* on_not_heap_number = NULL); SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
void JumpIfHeapNumber(Register object,
Label* on_heap_number,
Register heap_number_map = NoReg);
void JumpIfNotHeapNumber(Register object,
Label* on_not_heap_number,
Register heap_number_map = NoReg);
// Sets the vs flag if the input is -0.0. // Sets the vs flag if the input is -0.0.
void TestForMinusZero(DoubleRegister input); void TestForMinusZero(DoubleRegister input);
@ -1055,41 +1046,30 @@ class MacroAssembler : public Assembler {
// ---- String Utilities ---- // ---- String Utilities ----
// Jump to label if either object is not a sequential ASCII string. // Jump to label if either object is not a sequential one-byte string.
// Optionally perform a smi check on the objects first. // Optionally perform a smi check on the objects first.
void JumpIfEitherIsNotSequentialAsciiStrings( void JumpIfEitherIsNotSequentialOneByteStrings(
Register first, Register first, Register second, Register scratch1, Register scratch2,
Register second, Label* failure, SmiCheckType smi_check = DO_SMI_CHECK);
Register scratch1,
Register scratch2,
Label* failure,
SmiCheckType smi_check = DO_SMI_CHECK);
// Check if instance type is sequential ASCII string and jump to label if // Check if instance type is sequential one-byte string and jump to label if
// it is not. // it is not.
void JumpIfInstanceTypeIsNotSequentialAscii(Register type, void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
Register scratch,
Label* failure); Label* failure);
// Checks if both instance types are sequential ASCII strings and jumps to // Checks if both instance types are sequential one-byte strings and jumps to
// label if either is not. // label if either is not.
void JumpIfEitherInstanceTypeIsNotSequentialAscii( void JumpIfEitherInstanceTypeIsNotSequentialOneByte(
Register first_object_instance_type, Register first_object_instance_type, Register second_object_instance_type,
Register second_object_instance_type, Register scratch1, Register scratch2, Label* failure);
Register scratch1,
Register scratch2,
Label* failure);
// Checks if both instance types are sequential ASCII strings and jumps to // Checks if both instance types are sequential one-byte strings and jumps to
// label if either is not. // label if either is not.
void JumpIfBothInstanceTypesAreNotSequentialAscii( void JumpIfBothInstanceTypesAreNotSequentialOneByte(
Register first_object_instance_type, Register first_object_instance_type, Register second_object_instance_type,
Register second_object_instance_type, Register scratch1, Register scratch2, Label* failure);
Register scratch1,
Register scratch2,
Label* failure);
void JumpIfNotUniqueName(Register type, Label* not_unique_name); void JumpIfNotUniqueNameInstanceType(Register type, Label* not_unique_name);
// ---- Calling / Jumping helpers ---- // ---- Calling / Jumping helpers ----
@ -1369,31 +1349,24 @@ class MacroAssembler : public Assembler {
Register scratch2, Register scratch2,
Register scratch3, Register scratch3,
Label* gc_required); Label* gc_required);
void AllocateAsciiString(Register result, void AllocateOneByteString(Register result, Register length,
Register length, Register scratch1, Register scratch2,
Register scratch1, Register scratch3, Label* gc_required);
Register scratch2,
Register scratch3,
Label* gc_required);
void AllocateTwoByteConsString(Register result, void AllocateTwoByteConsString(Register result,
Register length, Register length,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
Label* gc_required); Label* gc_required);
void AllocateAsciiConsString(Register result, void AllocateOneByteConsString(Register result, Register length,
Register length, Register scratch1, Register scratch2,
Register scratch1,
Register scratch2,
Label* gc_required); Label* gc_required);
void AllocateTwoByteSlicedString(Register result, void AllocateTwoByteSlicedString(Register result,
Register length, Register length,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
Label* gc_required); Label* gc_required);
void AllocateAsciiSlicedString(Register result, void AllocateOneByteSlicedString(Register result, Register length,
Register length, Register scratch1, Register scratch2,
Register scratch1,
Register scratch2,
Label* gc_required); Label* gc_required);
// Allocates a heap number or jumps to the gc_required label if the young // Allocates a heap number or jumps to the gc_required label if the young
@ -1470,9 +1443,11 @@ class MacroAssembler : public Assembler {
// Compare an object's map with the specified map. Condition flags are set // Compare an object's map with the specified map. Condition flags are set
// with result of map compare. // with result of map compare.
void CompareMap(Register obj, void CompareObjectMap(Register obj, Heap::RootListIndex index);
Register scratch,
Handle<Map> map); // Compare an object's map with the specified map. Condition flags are set
// with result of map compare.
void CompareObjectMap(Register obj, Register scratch, Handle<Map> map);
// As above, but the map of the object is already loaded into the register // As above, but the map of the object is already loaded into the register
// which is preserved by the code generated. // which is preserved by the code generated.

View File

@ -260,7 +260,7 @@ void RegExpMacroAssemblerARM64::CheckCharacters(Vector<const uc16> str,
} }
for (int i = 0; i < str.length(); i++) { for (int i = 0; i < str.length(); i++) {
if (mode_ == ASCII) { if (mode_ == LATIN1) {
__ Ldrb(w10, MemOperand(characters_address, 1, PostIndex)); __ Ldrb(w10, MemOperand(characters_address, 1, PostIndex));
DCHECK(str[i] <= String::kMaxOneByteCharCode); DCHECK(str[i] <= String::kMaxOneByteCharCode);
} else { } else {
@ -307,7 +307,7 @@ void RegExpMacroAssemblerARM64::CheckNotBackReferenceIgnoreCase(
__ Cmn(capture_length, current_input_offset()); __ Cmn(capture_length, current_input_offset());
BranchOrBacktrack(gt, on_no_match); BranchOrBacktrack(gt, on_no_match);
if (mode_ == ASCII) { if (mode_ == LATIN1) {
Label success; Label success;
Label fail; Label fail;
Label loop_check; Label loop_check;
@ -447,7 +447,7 @@ void RegExpMacroAssemblerARM64::CheckNotBackReference(
Label loop; Label loop;
__ Bind(&loop); __ Bind(&loop);
if (mode_ == ASCII) { if (mode_ == LATIN1) {
__ Ldrb(w10, MemOperand(capture_start_address, 1, PostIndex)); __ Ldrb(w10, MemOperand(capture_start_address, 1, PostIndex));
__ Ldrb(w11, MemOperand(current_position_address, 1, PostIndex)); __ Ldrb(w11, MemOperand(current_position_address, 1, PostIndex));
} else { } else {
@ -530,7 +530,7 @@ void RegExpMacroAssemblerARM64::CheckBitInTable(
Handle<ByteArray> table, Handle<ByteArray> table,
Label* on_bit_set) { Label* on_bit_set) {
__ Mov(x11, Operand(table)); __ Mov(x11, Operand(table));
if ((mode_ != ASCII) || (kTableMask != String::kMaxOneByteCharCode)) { if ((mode_ != LATIN1) || (kTableMask != String::kMaxOneByteCharCode)) {
__ And(w10, current_character(), kTableMask); __ And(w10, current_character(), kTableMask);
__ Add(w10, w10, ByteArray::kHeaderSize - kHeapObjectTag); __ Add(w10, w10, ByteArray::kHeaderSize - kHeapObjectTag);
} else { } else {
@ -548,7 +548,7 @@ bool RegExpMacroAssemblerARM64::CheckSpecialCharacterClass(uc16 type,
switch (type) { switch (type) {
case 's': case 's':
// Match space-characters // Match space-characters
if (mode_ == ASCII) { if (mode_ == LATIN1) {
// One byte space characters are '\t'..'\r', ' ' and \u00a0. // One byte space characters are '\t'..'\r', ' ' and \u00a0.
Label success; Label success;
// Check for ' ' or 0x00a0. // Check for ' ' or 0x00a0.
@ -611,8 +611,8 @@ bool RegExpMacroAssemblerARM64::CheckSpecialCharacterClass(uc16 type,
return true; return true;
} }
case 'w': { case 'w': {
if (mode_ != ASCII) { if (mode_ != LATIN1) {
// Table is 128 entries, so all ASCII characters can be tested. // Table is 256 entries, so all Latin1 characters can be tested.
CompareAndBranchOrBacktrack(current_character(), 'z', hi, on_no_match); CompareAndBranchOrBacktrack(current_character(), 'z', hi, on_no_match);
} }
ExternalReference map = ExternalReference::re_word_character_map(); ExternalReference map = ExternalReference::re_word_character_map();
@ -623,8 +623,8 @@ bool RegExpMacroAssemblerARM64::CheckSpecialCharacterClass(uc16 type,
} }
case 'W': { case 'W': {
Label done; Label done;
if (mode_ != ASCII) { if (mode_ != LATIN1) {
// Table is 128 entries, so all ASCII characters can be tested. // Table is 256 entries, so all Latin1 characters can be tested.
__ Cmp(current_character(), 'z'); __ Cmp(current_character(), 'z');
__ B(hi, &done); __ B(hi, &done);
} }
@ -1315,7 +1315,7 @@ int RegExpMacroAssemblerARM64::CheckStackGuardState(Address* return_address,
Handle<String> subject(frame_entry<String*>(re_frame, kInput)); Handle<String> subject(frame_entry<String*>(re_frame, kInput));
// Current string. // Current string.
bool is_ascii = subject->IsOneByteRepresentationUnderneath(); bool is_one_byte = subject->IsOneByteRepresentationUnderneath();
DCHECK(re_code->instruction_start() <= *return_address); DCHECK(re_code->instruction_start() <= *return_address);
DCHECK(*return_address <= DCHECK(*return_address <=
@ -1346,8 +1346,8 @@ int RegExpMacroAssemblerARM64::CheckStackGuardState(Address* return_address,
} }
// String might have changed. // String might have changed.
if (subject_tmp->IsOneByteRepresentation() != is_ascii) { if (subject_tmp->IsOneByteRepresentation() != is_one_byte) {
// If we changed between an ASCII and an UC16 string, the specialized // If we changed between an Latin1 and an UC16 string, the specialized
// code cannot be used, and we need to restart regexp matching from // code cannot be used, and we need to restart regexp matching from
// scratch (including, potentially, compiling a new version of the code). // scratch (including, potentially, compiling a new version of the code).
return RETRY; return RETRY;
@ -1675,7 +1675,7 @@ void RegExpMacroAssemblerARM64::LoadCurrentCharacterUnchecked(int cp_offset,
offset = w10; offset = w10;
} }
if (mode_ == ASCII) { if (mode_ == LATIN1) {
if (characters == 4) { if (characters == 4) {
__ Ldr(current_character(), MemOperand(input_end(), offset, SXTW)); __ Ldr(current_character(), MemOperand(input_end(), offset, SXTW));
} else if (characters == 2) { } else if (characters == 2) {

View File

@ -265,7 +265,7 @@ class RegExpMacroAssemblerARM64: public NativeRegExpMacroAssembler {
MacroAssembler* masm_; MacroAssembler* masm_;
// Which mode to generate code for (ASCII or UC16). // Which mode to generate code for (LATIN1 or UC16).
Mode mode_; Mode mode_;
// One greater than maximal register index actually used. // One greater than maximal register index actually used.

View File

@ -30,30 +30,28 @@ namespace internal {
// Helpers for colors. // Helpers for colors.
// Depending on your terminal configuration, the colour names may not match the #define COLOUR(colour_code) "\033[0;" colour_code "m"
// observed colours. #define COLOUR_BOLD(colour_code) "\033[1;" colour_code "m"
#define COLOUR(colour_code) "\033[" colour_code "m"
#define BOLD(colour_code) "1;" colour_code
#define NORMAL "" #define NORMAL ""
#define GREY "30" #define GREY "30"
#define RED "31"
#define GREEN "32" #define GREEN "32"
#define ORANGE "33" #define YELLOW "33"
#define BLUE "34" #define BLUE "34"
#define PURPLE "35" #define MAGENTA "35"
#define INDIGO "36" #define CYAN "36"
#define WHITE "37" #define WHITE "37"
typedef char const * const TEXT_COLOUR; typedef char const * const TEXT_COLOUR;
TEXT_COLOUR clr_normal = FLAG_log_colour ? COLOUR(NORMAL) : ""; TEXT_COLOUR clr_normal = FLAG_log_colour ? COLOUR(NORMAL) : "";
TEXT_COLOUR clr_flag_name = FLAG_log_colour ? COLOUR(BOLD(GREY)) : ""; TEXT_COLOUR clr_flag_name = FLAG_log_colour ? COLOUR_BOLD(WHITE) : "";
TEXT_COLOUR clr_flag_value = FLAG_log_colour ? COLOUR(BOLD(WHITE)) : ""; TEXT_COLOUR clr_flag_value = FLAG_log_colour ? COLOUR(NORMAL) : "";
TEXT_COLOUR clr_reg_name = FLAG_log_colour ? COLOUR(BOLD(BLUE)) : ""; TEXT_COLOUR clr_reg_name = FLAG_log_colour ? COLOUR_BOLD(CYAN) : "";
TEXT_COLOUR clr_reg_value = FLAG_log_colour ? COLOUR(BOLD(INDIGO)) : ""; TEXT_COLOUR clr_reg_value = FLAG_log_colour ? COLOUR(CYAN) : "";
TEXT_COLOUR clr_fpreg_name = FLAG_log_colour ? COLOUR(BOLD(ORANGE)) : ""; TEXT_COLOUR clr_fpreg_name = FLAG_log_colour ? COLOUR_BOLD(MAGENTA) : "";
TEXT_COLOUR clr_fpreg_value = FLAG_log_colour ? COLOUR(BOLD(PURPLE)) : ""; TEXT_COLOUR clr_fpreg_value = FLAG_log_colour ? COLOUR(MAGENTA) : "";
TEXT_COLOUR clr_memory_value = FLAG_log_colour ? COLOUR(BOLD(GREEN)) : ""; TEXT_COLOUR clr_memory_address = FLAG_log_colour ? COLOUR_BOLD(BLUE) : "";
TEXT_COLOUR clr_memory_address = FLAG_log_colour ? COLOUR(GREEN) : ""; TEXT_COLOUR clr_debug_number = FLAG_log_colour ? COLOUR_BOLD(YELLOW) : "";
TEXT_COLOUR clr_debug_number = FLAG_log_colour ? COLOUR(BOLD(ORANGE)) : ""; TEXT_COLOUR clr_debug_message = FLAG_log_colour ? COLOUR(YELLOW) : "";
TEXT_COLOUR clr_debug_message = FLAG_log_colour ? COLOUR(ORANGE) : "";
TEXT_COLOUR clr_printf = FLAG_log_colour ? COLOUR(GREEN) : ""; TEXT_COLOUR clr_printf = FLAG_log_colour ? COLOUR(GREEN) : "";
@ -337,7 +335,7 @@ uintptr_t Simulator::PopAddress() {
uintptr_t Simulator::StackLimit() const { uintptr_t Simulator::StackLimit() const {
// Leave a safety margin of 1024 bytes to prevent overrunning the stack when // Leave a safety margin of 1024 bytes to prevent overrunning the stack when
// pushing values. // pushing values.
return reinterpret_cast<uintptr_t>(stack_limit_) + 1024; return stack_limit_ + 1024;
} }
@ -380,11 +378,11 @@ void Simulator::Init(FILE* stream) {
// Allocate and setup the simulator stack. // Allocate and setup the simulator stack.
stack_size_ = (FLAG_sim_stack_size * KB) + (2 * stack_protection_size_); stack_size_ = (FLAG_sim_stack_size * KB) + (2 * stack_protection_size_);
stack_ = new byte[stack_size_]; stack_ = reinterpret_cast<uintptr_t>(new byte[stack_size_]);
stack_limit_ = stack_ + stack_protection_size_; stack_limit_ = stack_ + stack_protection_size_;
byte* tos = stack_ + stack_size_ - stack_protection_size_; uintptr_t tos = stack_ + stack_size_ - stack_protection_size_;
// The stack pointer must be 16 bytes aligned. // The stack pointer must be 16-byte aligned.
set_sp(reinterpret_cast<int64_t>(tos) & ~0xfUL); set_sp(tos & ~0xfUL);
stream_ = stream; stream_ = stream;
print_disasm_ = new PrintDisassembler(stream_); print_disasm_ = new PrintDisassembler(stream_);
@ -420,7 +418,7 @@ void Simulator::ResetState() {
Simulator::~Simulator() { Simulator::~Simulator() {
delete[] stack_; delete[] reinterpret_cast<byte*>(stack_);
if (FLAG_log_instruction_stats) { if (FLAG_log_instruction_stats) {
delete instrument_; delete instrument_;
} }
@ -704,7 +702,7 @@ void Simulator::DoRuntimeCall(Instruction* instr) {
case ExternalReference::PROFILING_GETTER_CALL: { case ExternalReference::PROFILING_GETTER_CALL: {
// void f(Local<String> property, PropertyCallbackInfo& info, // void f(Local<String> property, PropertyCallbackInfo& info,
// AccessorGetterCallback callback) // AccessorNameGetterCallback callback)
TraceSim("Type: PROFILING_GETTER_CALL\n"); TraceSim("Type: PROFILING_GETTER_CALL\n");
SimulatorRuntimeProfilingGetterCall target = SimulatorRuntimeProfilingGetterCall target =
reinterpret_cast<SimulatorRuntimeProfilingGetterCall>( reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(
@ -765,7 +763,12 @@ const char* Simulator::vreg_names[] = {
const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) { const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) {
STATIC_ASSERT(arraysize(Simulator::wreg_names) == (kNumberOfRegisters + 1));
DCHECK(code < kNumberOfRegisters); DCHECK(code < kNumberOfRegisters);
// The modulo operator has no effect here, but it silences a broken GCC
// warning about out-of-bounds array accesses.
code %= kNumberOfRegisters;
// If the code represents the stack pointer, index the name after zr. // If the code represents the stack pointer, index the name after zr.
if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) { if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) {
code = kZeroRegCode + 1; code = kZeroRegCode + 1;
@ -775,7 +778,10 @@ const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) {
const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) { const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) {
STATIC_ASSERT(arraysize(Simulator::xreg_names) == (kNumberOfRegisters + 1));
DCHECK(code < kNumberOfRegisters); DCHECK(code < kNumberOfRegisters);
code %= kNumberOfRegisters;
// If the code represents the stack pointer, index the name after zr. // If the code represents the stack pointer, index the name after zr.
if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) { if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) {
code = kZeroRegCode + 1; code = kZeroRegCode + 1;
@ -785,20 +791,23 @@ const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) {
const char* Simulator::SRegNameForCode(unsigned code) { const char* Simulator::SRegNameForCode(unsigned code) {
STATIC_ASSERT(arraysize(Simulator::sreg_names) == kNumberOfFPRegisters);
DCHECK(code < kNumberOfFPRegisters); DCHECK(code < kNumberOfFPRegisters);
return sreg_names[code]; return sreg_names[code % kNumberOfFPRegisters];
} }
const char* Simulator::DRegNameForCode(unsigned code) { const char* Simulator::DRegNameForCode(unsigned code) {
STATIC_ASSERT(arraysize(Simulator::dreg_names) == kNumberOfFPRegisters);
DCHECK(code < kNumberOfFPRegisters); DCHECK(code < kNumberOfFPRegisters);
return dreg_names[code]; return dreg_names[code % kNumberOfFPRegisters];
} }
const char* Simulator::VRegNameForCode(unsigned code) { const char* Simulator::VRegNameForCode(unsigned code) {
STATIC_ASSERT(arraysize(Simulator::vreg_names) == kNumberOfFPRegisters);
DCHECK(code < kNumberOfFPRegisters); DCHECK(code < kNumberOfFPRegisters);
return vreg_names[code]; return vreg_names[code % kNumberOfFPRegisters];
} }
@ -855,6 +864,7 @@ T Simulator::AddWithCarry(bool set_flags,
nzcv().SetZ(Z); nzcv().SetZ(Z);
nzcv().SetC(C); nzcv().SetC(C);
nzcv().SetV(V); nzcv().SetV(V);
LogSystemRegister(NZCV);
} }
return result; return result;
} }
@ -978,6 +988,7 @@ void Simulator::FPCompare(double val0, double val1) {
} else { } else {
UNREACHABLE(); UNREACHABLE();
} }
LogSystemRegister(NZCV);
} }
@ -1044,118 +1055,206 @@ void Simulator::PrintInstructionsAt(Instruction* start, uint64_t count) {
} }
void Simulator::PrintSystemRegisters(bool print_all) { void Simulator::PrintSystemRegisters() {
static bool first_run = true; PrintSystemRegister(NZCV);
PrintSystemRegister(FPCR);
}
static SimSystemRegister last_nzcv;
if (print_all || first_run || (last_nzcv.RawValue() != nzcv().RawValue())) { void Simulator::PrintRegisters() {
fprintf(stream_, "# %sFLAGS: %sN:%d Z:%d C:%d V:%d%s\n", for (unsigned i = 0; i < kNumberOfRegisters; i++) {
clr_flag_name, PrintRegister(i);
clr_flag_value, }
}
void Simulator::PrintFPRegisters() {
for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
PrintFPRegister(i);
}
}
void Simulator::PrintRegister(unsigned code, Reg31Mode r31mode) {
// Don't print writes into xzr.
if ((code == kZeroRegCode) && (r31mode == Reg31IsZeroRegister)) {
return;
}
// The template is "# x<code>:value".
fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s\n",
clr_reg_name, XRegNameForCode(code, r31mode),
clr_reg_value, reg<uint64_t>(code, r31mode), clr_normal);
}
void Simulator::PrintFPRegister(unsigned code, PrintFPRegisterSizes sizes) {
// The template is "# v<code>:bits (d<code>:value, ...)".
DCHECK(sizes != 0);
DCHECK((sizes & kPrintAllFPRegValues) == sizes);
// Print the raw bits.
fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s (",
clr_fpreg_name, VRegNameForCode(code),
clr_fpreg_value, fpreg<uint64_t>(code), clr_normal);
// Print all requested value interpretations.
bool need_separator = false;
if (sizes & kPrintDRegValue) {
fprintf(stream_, "%s%s%s: %s%g%s",
need_separator ? ", " : "",
clr_fpreg_name, DRegNameForCode(code),
clr_fpreg_value, fpreg<double>(code), clr_normal);
need_separator = true;
}
if (sizes & kPrintSRegValue) {
fprintf(stream_, "%s%s%s: %s%g%s",
need_separator ? ", " : "",
clr_fpreg_name, SRegNameForCode(code),
clr_fpreg_value, fpreg<float>(code), clr_normal);
need_separator = true;
}
// End the value list.
fprintf(stream_, ")\n");
}
void Simulator::PrintSystemRegister(SystemRegister id) {
switch (id) {
case NZCV:
fprintf(stream_, "# %sNZCV: %sN:%d Z:%d C:%d V:%d%s\n",
clr_flag_name, clr_flag_value,
nzcv().N(), nzcv().Z(), nzcv().C(), nzcv().V(), nzcv().N(), nzcv().Z(), nzcv().C(), nzcv().V(),
clr_normal); clr_normal);
} break;
last_nzcv = nzcv(); case FPCR: {
static SimSystemRegister last_fpcr;
if (print_all || first_run || (last_fpcr.RawValue() != fpcr().RawValue())) {
static const char * rmode[] = { static const char * rmode[] = {
"0b00 (Round to Nearest)", "0b00 (Round to Nearest)",
"0b01 (Round towards Plus Infinity)", "0b01 (Round towards Plus Infinity)",
"0b10 (Round towards Minus Infinity)", "0b10 (Round towards Minus Infinity)",
"0b11 (Round towards Zero)" "0b11 (Round towards Zero)"
}; };
DCHECK(fpcr().RMode() < ARRAY_SIZE(rmode)); DCHECK(fpcr().RMode() < arraysize(rmode));
fprintf(stream_, "# %sFPCR: %sAHP:%d DN:%d FZ:%d RMode:%s%s\n", fprintf(stream_,
clr_flag_name, "# %sFPCR: %sAHP:%d DN:%d FZ:%d RMode:%s%s\n",
clr_flag_value, clr_flag_name, clr_flag_value,
fpcr().AHP(), fpcr().DN(), fpcr().FZ(), rmode[fpcr().RMode()], fpcr().AHP(), fpcr().DN(), fpcr().FZ(), rmode[fpcr().RMode()],
clr_normal); clr_normal);
break;
}
default:
UNREACHABLE();
} }
last_fpcr = fpcr();
first_run = false;
} }
void Simulator::PrintRegisters(bool print_all_regs) { void Simulator::PrintRead(uintptr_t address,
static bool first_run = true; size_t size,
static int64_t last_regs[kNumberOfRegisters]; unsigned reg_code) {
USE(size); // Size is unused here.
for (unsigned i = 0; i < kNumberOfRegisters; i++) { // The template is "# x<code>:value <- address".
if (print_all_regs || first_run || fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s",
(last_regs[i] != xreg(i, Reg31IsStackPointer))) { clr_reg_name, XRegNameForCode(reg_code),
fprintf(stream_, clr_reg_value, reg<uint64_t>(reg_code), clr_normal);
"# %s%4s:%s 0x%016" PRIx64 "%s\n",
clr_reg_name, fprintf(stream_, " <- %s0x%016" PRIxPTR "%s\n",
XRegNameForCode(i, Reg31IsStackPointer), clr_memory_address, address, clr_normal);
clr_reg_value,
xreg(i, Reg31IsStackPointer),
clr_normal);
}
// Cache the new register value so the next run can detect any changes.
last_regs[i] = xreg(i, Reg31IsStackPointer);
}
first_run = false;
} }
void Simulator::PrintFPRegisters(bool print_all_regs) { void Simulator::PrintReadFP(uintptr_t address,
static bool first_run = true; size_t size,
static uint64_t last_regs[kNumberOfFPRegisters]; unsigned reg_code) {
// The template is "# reg:bits (reg:value) <- address".
switch (size) {
case kSRegSize:
fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s (%s%s: %s%gf%s)",
clr_fpreg_name, VRegNameForCode(reg_code),
clr_fpreg_value, fpreg<uint64_t>(reg_code), clr_normal,
clr_fpreg_name, SRegNameForCode(reg_code),
clr_fpreg_value, fpreg<float>(reg_code), clr_normal);
break;
case kDRegSize:
fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s (%s%s: %s%g%s)",
clr_fpreg_name, VRegNameForCode(reg_code),
clr_fpreg_value, fpreg<uint64_t>(reg_code), clr_normal,
clr_fpreg_name, DRegNameForCode(reg_code),
clr_fpreg_value, fpreg<double>(reg_code), clr_normal);
break;
default:
UNREACHABLE();
}
// Print as many rows of registers as necessary, keeping each individual fprintf(stream_, " <- %s0x%016" PRIxPTR "%s\n",
// register in the same column each time (to make it easy to visually scan clr_memory_address, address, clr_normal);
// for changes).
for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
if (print_all_regs || first_run || (last_regs[i] != dreg_bits(i))) {
fprintf(stream_,
"# %s %4s:%s 0x%016" PRIx64 "%s (%s%s:%s %g%s %s:%s %g%s)\n",
clr_fpreg_name,
VRegNameForCode(i),
clr_fpreg_value,
dreg_bits(i),
clr_normal,
clr_fpreg_name,
DRegNameForCode(i),
clr_fpreg_value,
dreg(i),
clr_fpreg_name,
SRegNameForCode(i),
clr_fpreg_value,
sreg(i),
clr_normal);
}
// Cache the new register value so the next run can detect any changes.
last_regs[i] = dreg_bits(i);
}
first_run = false;
} }
void Simulator::PrintProcessorState() { void Simulator::PrintWrite(uintptr_t address,
PrintSystemRegisters(); size_t size,
PrintRegisters(); unsigned reg_code) {
PrintFPRegisters(); // The template is "# reg:value -> address". To keep the trace tidy and
// readable, the value is aligned with the values in the register trace.
switch (size) {
case kByteSizeInBytes:
fprintf(stream_, "# %s%5s<7:0>: %s0x%02" PRIx8 "%s",
clr_reg_name, WRegNameForCode(reg_code),
clr_reg_value, reg<uint8_t>(reg_code), clr_normal);
break;
case kHalfWordSizeInBytes:
fprintf(stream_, "# %s%5s<15:0>: %s0x%04" PRIx16 "%s",
clr_reg_name, WRegNameForCode(reg_code),
clr_reg_value, reg<uint16_t>(reg_code), clr_normal);
break;
case kWRegSize:
fprintf(stream_, "# %s%5s: %s0x%08" PRIx32 "%s",
clr_reg_name, WRegNameForCode(reg_code),
clr_reg_value, reg<uint32_t>(reg_code), clr_normal);
break;
case kXRegSize:
fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s",
clr_reg_name, XRegNameForCode(reg_code),
clr_reg_value, reg<uint64_t>(reg_code), clr_normal);
break;
default:
UNREACHABLE();
}
fprintf(stream_, " -> %s0x%016" PRIxPTR "%s\n",
clr_memory_address, address, clr_normal);
} }
void Simulator::PrintWrite(uint8_t* address, void Simulator::PrintWriteFP(uintptr_t address,
uint64_t value, size_t size,
unsigned num_bytes) { unsigned reg_code) {
// The template is "# value -> address". The template is not directly used // The template is "# reg:bits (reg:value) -> address". To keep the trace tidy
// in the printf since compilers tend to struggle with the parametrized // and readable, the value is aligned with the values in the register trace.
// width (%0*). switch (size) {
const char* format = "# %s0x%0*" PRIx64 "%s -> %s0x%016" PRIx64 "%s\n"; case kSRegSize:
fprintf(stream_, fprintf(stream_, "# %s%5s<31:0>: %s0x%08" PRIx32 "%s (%s%s: %s%gf%s)",
format, clr_fpreg_name, VRegNameForCode(reg_code),
clr_memory_value, clr_fpreg_value, fpreg<uint32_t>(reg_code), clr_normal,
num_bytes * 2, // The width in hexa characters. clr_fpreg_name, SRegNameForCode(reg_code),
value, clr_fpreg_value, fpreg<float>(reg_code), clr_normal);
clr_normal, break;
clr_memory_address, case kDRegSize:
address, fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s (%s%s: %s%g%s)",
clr_normal); clr_fpreg_name, VRegNameForCode(reg_code),
clr_fpreg_value, fpreg<uint64_t>(reg_code), clr_normal,
clr_fpreg_name, DRegNameForCode(reg_code),
clr_fpreg_value, fpreg<double>(reg_code), clr_normal);
break;
default:
UNREACHABLE();
}
fprintf(stream_, " -> %s0x%016" PRIxPTR "%s\n",
clr_memory_address, address, clr_normal);
} }
@ -1384,6 +1483,7 @@ void Simulator::LogicalHelper(Instruction* instr, T op2) {
nzcv().SetZ(CalcZFlag(result)); nzcv().SetZ(CalcZFlag(result));
nzcv().SetC(0); nzcv().SetC(0);
nzcv().SetV(0); nzcv().SetV(0);
LogSystemRegister(NZCV);
} }
set_reg<T>(instr->Rd(), result, instr->RdMode()); set_reg<T>(instr->Rd(), result, instr->RdMode());
@ -1424,6 +1524,7 @@ void Simulator::ConditionalCompareHelper(Instruction* instr, T op2) {
} else { } else {
// If the condition fails, set the status flags to the nzcv immediate. // If the condition fails, set the status flags to the nzcv immediate.
nzcv().SetFlags(instr->Nzcv()); nzcv().SetFlags(instr->Nzcv());
LogSystemRegister(NZCV);
} }
} }
@ -1464,9 +1565,8 @@ void Simulator::LoadStoreHelper(Instruction* instr,
AddrMode addrmode) { AddrMode addrmode) {
unsigned srcdst = instr->Rt(); unsigned srcdst = instr->Rt();
unsigned addr_reg = instr->Rn(); unsigned addr_reg = instr->Rn();
uint8_t* address = LoadStoreAddress(addr_reg, offset, addrmode); uintptr_t address = LoadStoreAddress(addr_reg, offset, addrmode);
int num_bytes = 1 << instr->SizeLS(); uintptr_t stack = 0;
uint8_t* stack = NULL;
// Handle the writeback for stores before the store. On a CPU the writeback // Handle the writeback for stores before the store. On a CPU the writeback
// and the store are atomic, but when running on the simulator it is possible // and the store are atomic, but when running on the simulator it is possible
@ -1480,46 +1580,52 @@ void Simulator::LoadStoreHelper(Instruction* instr,
// For store the address post writeback is used to check access below the // For store the address post writeback is used to check access below the
// stack. // stack.
stack = reinterpret_cast<uint8_t*>(sp()); stack = sp();
} }
LoadStoreOp op = static_cast<LoadStoreOp>(instr->Mask(LoadStoreOpMask)); LoadStoreOp op = static_cast<LoadStoreOp>(instr->Mask(LoadStoreOpMask));
switch (op) { switch (op) {
case LDRB_w: // Use _no_log variants to suppress the register trace (LOG_REGS,
case LDRH_w: // LOG_FP_REGS). We will print a more detailed log.
case LDR_w: case LDRB_w: set_wreg_no_log(srcdst, MemoryRead<uint8_t>(address)); break;
case LDR_x: set_xreg(srcdst, MemoryRead(address, num_bytes)); break; case LDRH_w: set_wreg_no_log(srcdst, MemoryRead<uint16_t>(address)); break;
case STRB_w: case LDR_w: set_wreg_no_log(srcdst, MemoryRead<uint32_t>(address)); break;
case STRH_w: case LDR_x: set_xreg_no_log(srcdst, MemoryRead<uint64_t>(address)); break;
case STR_w: case LDRSB_w: set_wreg_no_log(srcdst, MemoryRead<int8_t>(address)); break;
case STR_x: MemoryWrite(address, xreg(srcdst), num_bytes); break; case LDRSH_w: set_wreg_no_log(srcdst, MemoryRead<int16_t>(address)); break;
case LDRSB_w: { case LDRSB_x: set_xreg_no_log(srcdst, MemoryRead<int8_t>(address)); break;
set_wreg(srcdst, ExtendValue<int32_t>(MemoryRead8(address), SXTB)); case LDRSH_x: set_xreg_no_log(srcdst, MemoryRead<int16_t>(address)); break;
break; case LDRSW_x: set_xreg_no_log(srcdst, MemoryRead<int32_t>(address)); break;
} case LDR_s: set_sreg_no_log(srcdst, MemoryRead<float>(address)); break;
case LDRSB_x: { case LDR_d: set_dreg_no_log(srcdst, MemoryRead<double>(address)); break;
set_xreg(srcdst, ExtendValue<int64_t>(MemoryRead8(address), SXTB));
break; case STRB_w: MemoryWrite<uint8_t>(address, wreg(srcdst)); break;
} case STRH_w: MemoryWrite<uint16_t>(address, wreg(srcdst)); break;
case LDRSH_w: { case STR_w: MemoryWrite<uint32_t>(address, wreg(srcdst)); break;
set_wreg(srcdst, ExtendValue<int32_t>(MemoryRead16(address), SXTH)); case STR_x: MemoryWrite<uint64_t>(address, xreg(srcdst)); break;
break; case STR_s: MemoryWrite<float>(address, sreg(srcdst)); break;
} case STR_d: MemoryWrite<double>(address, dreg(srcdst)); break;
case LDRSH_x: {
set_xreg(srcdst, ExtendValue<int64_t>(MemoryRead16(address), SXTH));
break;
}
case LDRSW_x: {
set_xreg(srcdst, ExtendValue<int64_t>(MemoryRead32(address), SXTW));
break;
}
case LDR_s: set_sreg(srcdst, MemoryReadFP32(address)); break;
case LDR_d: set_dreg(srcdst, MemoryReadFP64(address)); break;
case STR_s: MemoryWriteFP32(address, sreg(srcdst)); break;
case STR_d: MemoryWriteFP64(address, dreg(srcdst)); break;
default: UNIMPLEMENTED(); default: UNIMPLEMENTED();
} }
// Print a detailed trace (including the memory address) instead of the basic
// register:value trace generated by set_*reg().
size_t access_size = 1 << instr->SizeLS();
if (instr->IsLoad()) {
if ((op == LDR_s) || (op == LDR_d)) {
LogReadFP(address, access_size, srcdst);
} else {
LogRead(address, access_size, srcdst);
}
} else {
if ((op == STR_s) || (op == STR_d)) {
LogWriteFP(address, access_size, srcdst);
} else {
LogWrite(address, access_size, srcdst);
}
}
// Handle the writeback for loads after the load to ensure safe pop // Handle the writeback for loads after the load to ensure safe pop
// operation even when interrupted in the middle of it. The stack pointer // operation even when interrupted in the middle of it. The stack pointer
// is only updated after the load so pop(fp) will never break the invariant // is only updated after the load so pop(fp) will never break the invariant
@ -1527,7 +1633,7 @@ void Simulator::LoadStoreHelper(Instruction* instr,
if (instr->IsLoad()) { if (instr->IsLoad()) {
// For loads the address pre writeback is used to check access below the // For loads the address pre writeback is used to check access below the
// stack. // stack.
stack = reinterpret_cast<uint8_t*>(sp()); stack = sp();
LoadStoreWriteBack(addr_reg, offset, addrmode); LoadStoreWriteBack(addr_reg, offset, addrmode);
} }
@ -1563,9 +1669,11 @@ void Simulator::LoadStorePairHelper(Instruction* instr,
unsigned rt = instr->Rt(); unsigned rt = instr->Rt();
unsigned rt2 = instr->Rt2(); unsigned rt2 = instr->Rt2();
unsigned addr_reg = instr->Rn(); unsigned addr_reg = instr->Rn();
int offset = instr->ImmLSPair() << instr->SizeLSPair(); size_t access_size = 1 << instr->SizeLSPair();
uint8_t* address = LoadStoreAddress(addr_reg, offset, addrmode); int64_t offset = instr->ImmLSPair() * access_size;
uint8_t* stack = NULL; uintptr_t address = LoadStoreAddress(addr_reg, offset, addrmode);
uintptr_t address2 = address + access_size;
uintptr_t stack = 0;
// Handle the writeback for stores before the store. On a CPU the writeback // Handle the writeback for stores before the store. On a CPU the writeback
// and the store are atomic, but when running on the simulator it is possible // and the store are atomic, but when running on the simulator it is possible
@ -1579,7 +1687,7 @@ void Simulator::LoadStorePairHelper(Instruction* instr,
// For store the address post writeback is used to check access below the // For store the address post writeback is used to check access below the
// stack. // stack.
stack = reinterpret_cast<uint8_t*>(sp()); stack = sp();
} }
LoadStorePairOp op = LoadStorePairOp op =
@ -1589,55 +1697,85 @@ void Simulator::LoadStorePairHelper(Instruction* instr,
DCHECK(((op & LoadStorePairLBit) == 0) || (rt != rt2)); DCHECK(((op & LoadStorePairLBit) == 0) || (rt != rt2));
switch (op) { switch (op) {
// Use _no_log variants to suppress the register trace (LOG_REGS,
// LOG_FP_REGS). We will print a more detailed log.
case LDP_w: { case LDP_w: {
set_wreg(rt, MemoryRead32(address)); DCHECK(access_size == kWRegSize);
set_wreg(rt2, MemoryRead32(address + kWRegSize)); set_wreg_no_log(rt, MemoryRead<uint32_t>(address));
set_wreg_no_log(rt2, MemoryRead<uint32_t>(address2));
break; break;
} }
case LDP_s: { case LDP_s: {
set_sreg(rt, MemoryReadFP32(address)); DCHECK(access_size == kSRegSize);
set_sreg(rt2, MemoryReadFP32(address + kSRegSize)); set_sreg_no_log(rt, MemoryRead<float>(address));
set_sreg_no_log(rt2, MemoryRead<float>(address2));
break; break;
} }
case LDP_x: { case LDP_x: {
set_xreg(rt, MemoryRead64(address)); DCHECK(access_size == kXRegSize);
set_xreg(rt2, MemoryRead64(address + kXRegSize)); set_xreg_no_log(rt, MemoryRead<uint64_t>(address));
set_xreg_no_log(rt2, MemoryRead<uint64_t>(address2));
break; break;
} }
case LDP_d: { case LDP_d: {
set_dreg(rt, MemoryReadFP64(address)); DCHECK(access_size == kDRegSize);
set_dreg(rt2, MemoryReadFP64(address + kDRegSize)); set_dreg_no_log(rt, MemoryRead<double>(address));
set_dreg_no_log(rt2, MemoryRead<double>(address2));
break; break;
} }
case LDPSW_x: { case LDPSW_x: {
set_xreg(rt, ExtendValue<int64_t>(MemoryRead32(address), SXTW)); DCHECK(access_size == kWRegSize);
set_xreg(rt2, ExtendValue<int64_t>( set_xreg_no_log(rt, MemoryRead<int32_t>(address));
MemoryRead32(address + kWRegSize), SXTW)); set_xreg_no_log(rt2, MemoryRead<int32_t>(address2));
break; break;
} }
case STP_w: { case STP_w: {
MemoryWrite32(address, wreg(rt)); DCHECK(access_size == kWRegSize);
MemoryWrite32(address + kWRegSize, wreg(rt2)); MemoryWrite<uint32_t>(address, wreg(rt));
MemoryWrite<uint32_t>(address2, wreg(rt2));
break; break;
} }
case STP_s: { case STP_s: {
MemoryWriteFP32(address, sreg(rt)); DCHECK(access_size == kSRegSize);
MemoryWriteFP32(address + kSRegSize, sreg(rt2)); MemoryWrite<float>(address, sreg(rt));
MemoryWrite<float>(address2, sreg(rt2));
break; break;
} }
case STP_x: { case STP_x: {
MemoryWrite64(address, xreg(rt)); DCHECK(access_size == kXRegSize);
MemoryWrite64(address + kXRegSize, xreg(rt2)); MemoryWrite<uint64_t>(address, xreg(rt));
MemoryWrite<uint64_t>(address2, xreg(rt2));
break; break;
} }
case STP_d: { case STP_d: {
MemoryWriteFP64(address, dreg(rt)); DCHECK(access_size == kDRegSize);
MemoryWriteFP64(address + kDRegSize, dreg(rt2)); MemoryWrite<double>(address, dreg(rt));
MemoryWrite<double>(address2, dreg(rt2));
break; break;
} }
default: UNREACHABLE(); default: UNREACHABLE();
} }
// Print a detailed trace (including the memory address) instead of the basic
// register:value trace generated by set_*reg().
if (instr->IsLoad()) {
if ((op == LDP_s) || (op == LDP_d)) {
LogReadFP(address, access_size, rt);
LogReadFP(address2, access_size, rt2);
} else {
LogRead(address, access_size, rt);
LogRead(address2, access_size, rt2);
}
} else {
if ((op == STP_s) || (op == STP_d)) {
LogWriteFP(address, access_size, rt);
LogWriteFP(address2, access_size, rt2);
} else {
LogWrite(address, access_size, rt);
LogWrite(address2, access_size, rt2);
}
}
// Handle the writeback for loads after the load to ensure safe pop // Handle the writeback for loads after the load to ensure safe pop
// operation even when interrupted in the middle of it. The stack pointer // operation even when interrupted in the middle of it. The stack pointer
// is only updated after the load so pop(fp) will never break the invariant // is only updated after the load so pop(fp) will never break the invariant
@ -1645,7 +1783,7 @@ void Simulator::LoadStorePairHelper(Instruction* instr,
if (instr->IsLoad()) { if (instr->IsLoad()) {
// For loads the address pre writeback is used to check access below the // For loads the address pre writeback is used to check access below the
// stack. // stack.
stack = reinterpret_cast<uint8_t*>(sp()); stack = sp();
LoadStoreWriteBack(addr_reg, offset, addrmode); LoadStoreWriteBack(addr_reg, offset, addrmode);
} }
@ -1657,24 +1795,37 @@ void Simulator::LoadStorePairHelper(Instruction* instr,
void Simulator::VisitLoadLiteral(Instruction* instr) { void Simulator::VisitLoadLiteral(Instruction* instr) {
uint8_t* address = instr->LiteralAddress(); uintptr_t address = instr->LiteralAddress();
unsigned rt = instr->Rt(); unsigned rt = instr->Rt();
switch (instr->Mask(LoadLiteralMask)) { switch (instr->Mask(LoadLiteralMask)) {
case LDR_w_lit: set_wreg(rt, MemoryRead32(address)); break; // Use _no_log variants to suppress the register trace (LOG_REGS,
case LDR_x_lit: set_xreg(rt, MemoryRead64(address)); break; // LOG_FP_REGS), then print a more detailed log.
case LDR_s_lit: set_sreg(rt, MemoryReadFP32(address)); break; case LDR_w_lit:
case LDR_d_lit: set_dreg(rt, MemoryReadFP64(address)); break; set_wreg_no_log(rt, MemoryRead<uint32_t>(address));
LogRead(address, kWRegSize, rt);
break;
case LDR_x_lit:
set_xreg_no_log(rt, MemoryRead<uint64_t>(address));
LogRead(address, kXRegSize, rt);
break;
case LDR_s_lit:
set_sreg_no_log(rt, MemoryRead<float>(address));
LogReadFP(address, kSRegSize, rt);
break;
case LDR_d_lit:
set_dreg_no_log(rt, MemoryRead<double>(address));
LogReadFP(address, kDRegSize, rt);
break;
default: UNREACHABLE(); default: UNREACHABLE();
} }
} }
uint8_t* Simulator::LoadStoreAddress(unsigned addr_reg, uintptr_t Simulator::LoadStoreAddress(unsigned addr_reg, int64_t offset,
int64_t offset,
AddrMode addrmode) { AddrMode addrmode) {
const unsigned kSPRegCode = kSPRegInternalCode & kRegCodeMask; const unsigned kSPRegCode = kSPRegInternalCode & kRegCodeMask;
int64_t address = xreg(addr_reg, Reg31IsStackPointer); uint64_t address = xreg(addr_reg, Reg31IsStackPointer);
if ((addr_reg == kSPRegCode) && ((address % 16) != 0)) { if ((addr_reg == kSPRegCode) && ((address % 16) != 0)) {
// When the base register is SP the stack pointer is required to be // When the base register is SP the stack pointer is required to be
// quadword aligned prior to the address calculation and write-backs. // quadword aligned prior to the address calculation and write-backs.
@ -1686,7 +1837,7 @@ uint8_t* Simulator::LoadStoreAddress(unsigned addr_reg,
address += offset; address += offset;
} }
return reinterpret_cast<uint8_t*>(address); return address;
} }
@ -1701,88 +1852,21 @@ void Simulator::LoadStoreWriteBack(unsigned addr_reg,
} }
void Simulator::CheckMemoryAccess(uint8_t* address, uint8_t* stack) { void Simulator::CheckMemoryAccess(uintptr_t address, uintptr_t stack) {
if ((address >= stack_limit_) && (address < stack)) { if ((address >= stack_limit_) && (address < stack)) {
fprintf(stream_, "ACCESS BELOW STACK POINTER:\n"); fprintf(stream_, "ACCESS BELOW STACK POINTER:\n");
fprintf(stream_, " sp is here: 0x%16p\n", stack); fprintf(stream_, " sp is here: 0x%016" PRIx64 "\n",
fprintf(stream_, " access was here: 0x%16p\n", address); static_cast<uint64_t>(stack));
fprintf(stream_, " stack limit is here: 0x%16p\n", stack_limit_); fprintf(stream_, " access was here: 0x%016" PRIx64 "\n",
static_cast<uint64_t>(address));
fprintf(stream_, " stack limit is here: 0x%016" PRIx64 "\n",
static_cast<uint64_t>(stack_limit_));
fprintf(stream_, "\n"); fprintf(stream_, "\n");
FATAL("ACCESS BELOW STACK POINTER"); FATAL("ACCESS BELOW STACK POINTER");
} }
} }
uint64_t Simulator::MemoryRead(uint8_t* address, unsigned num_bytes) {
DCHECK(address != NULL);
DCHECK((num_bytes > 0) && (num_bytes <= sizeof(uint64_t)));
uint64_t read = 0;
memcpy(&read, address, num_bytes);
return read;
}
uint8_t Simulator::MemoryRead8(uint8_t* address) {
return MemoryRead(address, sizeof(uint8_t));
}
uint16_t Simulator::MemoryRead16(uint8_t* address) {
return MemoryRead(address, sizeof(uint16_t));
}
uint32_t Simulator::MemoryRead32(uint8_t* address) {
return MemoryRead(address, sizeof(uint32_t));
}
float Simulator::MemoryReadFP32(uint8_t* address) {
return rawbits_to_float(MemoryRead32(address));
}
uint64_t Simulator::MemoryRead64(uint8_t* address) {
return MemoryRead(address, sizeof(uint64_t));
}
double Simulator::MemoryReadFP64(uint8_t* address) {
return rawbits_to_double(MemoryRead64(address));
}
void Simulator::MemoryWrite(uint8_t* address,
uint64_t value,
unsigned num_bytes) {
DCHECK(address != NULL);
DCHECK((num_bytes > 0) && (num_bytes <= sizeof(uint64_t)));
LogWrite(address, value, num_bytes);
memcpy(address, &value, num_bytes);
}
void Simulator::MemoryWrite32(uint8_t* address, uint32_t value) {
MemoryWrite(address, value, sizeof(uint32_t));
}
void Simulator::MemoryWriteFP32(uint8_t* address, float value) {
MemoryWrite32(address, float_to_rawbits(value));
}
void Simulator::MemoryWrite64(uint8_t* address, uint64_t value) {
MemoryWrite(address, value, sizeof(uint64_t));
}
void Simulator::MemoryWriteFP64(uint8_t* address, double value) {
MemoryWrite64(address, double_to_rawbits(value));
}
void Simulator::VisitMoveWideImmediate(Instruction* instr) { void Simulator::VisitMoveWideImmediate(Instruction* instr) {
MoveWideImmediateOp mov_op = MoveWideImmediateOp mov_op =
static_cast<MoveWideImmediateOp>(instr->Mask(MoveWideImmediateMask)); static_cast<MoveWideImmediateOp>(instr->Mask(MoveWideImmediateMask));
@ -2331,6 +2415,7 @@ void Simulator::VisitFPConditionalCompare(Instruction* instr) {
} else { } else {
// If the condition fails, set the status flags to the nzcv immediate. // If the condition fails, set the status flags to the nzcv immediate.
nzcv().SetFlags(instr->Nzcv()); nzcv().SetFlags(instr->Nzcv());
LogSystemRegister(NZCV);
} }
break; break;
} }
@ -3113,8 +3198,14 @@ void Simulator::VisitSystem(Instruction* instr) {
} }
case MSR: { case MSR: {
switch (instr->ImmSystemRegister()) { switch (instr->ImmSystemRegister()) {
case NZCV: nzcv().SetRawValue(xreg(instr->Rt())); break; case NZCV:
case FPCR: fpcr().SetRawValue(xreg(instr->Rt())); break; nzcv().SetRawValue(xreg(instr->Rt()));
LogSystemRegister(NZCV);
break;
case FPCR:
fpcr().SetRawValue(xreg(instr->Rt()));
LogSystemRegister(FPCR);
break;
default: UNIMPLEMENTED(); default: UNIMPLEMENTED();
} }
break; break;
@ -3325,8 +3416,8 @@ void Simulator::Debug() {
} else if ((strcmp(cmd, "print") == 0) || (strcmp(cmd, "p") == 0)) { } else if ((strcmp(cmd, "print") == 0) || (strcmp(cmd, "p") == 0)) {
if (argc == 2) { if (argc == 2) {
if (strcmp(arg1, "all") == 0) { if (strcmp(arg1, "all") == 0) {
PrintRegisters(true); PrintRegisters();
PrintFPRegisters(true); PrintFPRegisters();
} else { } else {
if (!PrintValue(arg1)) { if (!PrintValue(arg1)) {
PrintF("%s unrecognized\n", arg1); PrintF("%s unrecognized\n", arg1);
@ -3530,7 +3621,7 @@ void Simulator::VisitException(Instruction* instr) {
if (FLAG_trace_sim_messages || FLAG_trace_sim || (parameters & BREAK)) { if (FLAG_trace_sim_messages || FLAG_trace_sim || (parameters & BREAK)) {
if (message != NULL) { if (message != NULL) {
PrintF(stream_, PrintF(stream_,
"%sDebugger hit %d: %s%s%s\n", "# %sDebugger hit %d: %s%s%s\n",
clr_debug_number, clr_debug_number,
code, code,
clr_debug_message, clr_debug_message,
@ -3538,7 +3629,7 @@ void Simulator::VisitException(Instruction* instr) {
clr_normal); clr_normal);
} else { } else {
PrintF(stream_, PrintF(stream_,
"%sDebugger hit %d.%s\n", "# %sDebugger hit %d.%s\n",
clr_debug_number, clr_debug_number,
code, code,
clr_normal); clr_normal);
@ -3565,9 +3656,9 @@ void Simulator::VisitException(Instruction* instr) {
// Don't print information that is already being traced. // Don't print information that is already being traced.
parameters &= ~log_parameters(); parameters &= ~log_parameters();
// Print the requested information. // Print the requested information.
if (parameters & LOG_SYS_REGS) PrintSystemRegisters(true); if (parameters & LOG_SYS_REGS) PrintSystemRegisters();
if (parameters & LOG_REGS) PrintRegisters(true); if (parameters & LOG_REGS) PrintRegisters();
if (parameters & LOG_FP_REGS) PrintFPRegisters(true); if (parameters & LOG_FP_REGS) PrintFPRegisters();
} }
// The stop parameters are inlined in the code. Skip them: // The stop parameters are inlined in the code. Skip them:

View File

@ -312,7 +312,6 @@ class Simulator : public DecoderVisitor {
DCHECK(IsAligned(reinterpret_cast<uintptr_t>(pc_), kInstructionSize)); DCHECK(IsAligned(reinterpret_cast<uintptr_t>(pc_), kInstructionSize));
CheckBreakNext(); CheckBreakNext();
Decode(pc_); Decode(pc_);
LogProcessorState();
increment_pc(); increment_pc();
CheckBreakpoints(); CheckBreakpoints();
} }
@ -348,16 +347,13 @@ class Simulator : public DecoderVisitor {
return reg<int64_t>(code, r31mode); return reg<int64_t>(code, r31mode);
} }
// Write 'size' bits of 'value' into an integer register. The value is // Write 'value' into an integer register. The value is zero-extended. This
// zero-extended. This behaviour matches AArch64 register writes. // behaviour matches AArch64 register writes.
// Like set_reg(), but infer the access size from the template type.
template<typename T> template<typename T>
void set_reg(unsigned code, T value, void set_reg(unsigned code, T value,
Reg31Mode r31mode = Reg31IsZeroRegister) { Reg31Mode r31mode = Reg31IsZeroRegister) {
DCHECK(code < kNumberOfRegisters); set_reg_no_log(code, value, r31mode);
if (!IsZeroRegister(code, r31mode)) LogRegister(code, r31mode);
registers_[code].Set(value);
} }
// Common specialized accessors for the set_reg() template. // Common specialized accessors for the set_reg() template.
@ -371,6 +367,26 @@ class Simulator : public DecoderVisitor {
set_reg(code, value, r31mode); set_reg(code, value, r31mode);
} }
// As above, but don't automatically log the register update.
template <typename T>
void set_reg_no_log(unsigned code, T value,
Reg31Mode r31mode = Reg31IsZeroRegister) {
DCHECK(code < kNumberOfRegisters);
if (!IsZeroRegister(code, r31mode)) {
registers_[code].Set(value);
}
}
void set_wreg_no_log(unsigned code, int32_t value,
Reg31Mode r31mode = Reg31IsZeroRegister) {
set_reg_no_log(code, value, r31mode);
}
void set_xreg_no_log(unsigned code, int64_t value,
Reg31Mode r31mode = Reg31IsZeroRegister) {
set_reg_no_log(code, value, r31mode);
}
// Commonly-used special cases. // Commonly-used special cases.
template<typename T> template<typename T>
void set_lr(T value) { void set_lr(T value) {
@ -430,9 +446,13 @@ class Simulator : public DecoderVisitor {
// This behaviour matches AArch64 register writes. // This behaviour matches AArch64 register writes.
template<typename T> template<typename T>
void set_fpreg(unsigned code, T value) { void set_fpreg(unsigned code, T value) {
DCHECK((sizeof(value) == kDRegSize) || (sizeof(value) == kSRegSize)); set_fpreg_no_log(code, value);
DCHECK(code < kNumberOfFPRegisters);
fpregisters_[code].Set(value); if (sizeof(value) <= kSRegSize) {
LogFPRegister(code, kPrintSRegValue);
} else {
LogFPRegister(code, kPrintDRegValue);
}
} }
// Common specialized accessors for the set_fpreg() template. // Common specialized accessors for the set_fpreg() template.
@ -452,6 +472,22 @@ class Simulator : public DecoderVisitor {
set_fpreg(code, value); set_fpreg(code, value);
} }
// As above, but don't automatically log the register update.
template <typename T>
void set_fpreg_no_log(unsigned code, T value) {
DCHECK((sizeof(value) == kDRegSize) || (sizeof(value) == kSRegSize));
DCHECK(code < kNumberOfFPRegisters);
fpregisters_[code].Set(value);
}
void set_sreg_no_log(unsigned code, float value) {
set_fpreg_no_log(code, value);
}
void set_dreg_no_log(unsigned code, double value) {
set_fpreg_no_log(code, value);
}
SimSystemRegister& nzcv() { return nzcv_; } SimSystemRegister& nzcv() { return nzcv_; }
SimSystemRegister& fpcr() { return fpcr_; } SimSystemRegister& fpcr() { return fpcr_; }
@ -478,27 +514,68 @@ class Simulator : public DecoderVisitor {
// Disassemble instruction at the given address. // Disassemble instruction at the given address.
void PrintInstructionsAt(Instruction* pc, uint64_t count); void PrintInstructionsAt(Instruction* pc, uint64_t count);
void PrintSystemRegisters(bool print_all = false); // Print all registers of the specified types.
void PrintRegisters(bool print_all_regs = false); void PrintRegisters();
void PrintFPRegisters(bool print_all_regs = false); void PrintFPRegisters();
void PrintProcessorState(); void PrintSystemRegisters();
void PrintWrite(uint8_t* address, uint64_t value, unsigned num_bytes);
// Like Print* (above), but respect log_parameters().
void LogSystemRegisters() { void LogSystemRegisters() {
if (log_parameters_ & LOG_SYS_REGS) PrintSystemRegisters(); if (log_parameters() & LOG_SYS_REGS) PrintSystemRegisters();
} }
void LogRegisters() { void LogRegisters() {
if (log_parameters_ & LOG_REGS) PrintRegisters(); if (log_parameters() & LOG_REGS) PrintRegisters();
} }
void LogFPRegisters() { void LogFPRegisters() {
if (log_parameters_ & LOG_FP_REGS) PrintFPRegisters(); if (log_parameters() & LOG_FP_REGS) PrintFPRegisters();
} }
void LogProcessorState() {
LogSystemRegisters(); // Specify relevant register sizes, for PrintFPRegister.
LogRegisters(); //
LogFPRegisters(); // These values are bit masks; they can be combined in case multiple views of
// a machine register are interesting.
enum PrintFPRegisterSizes {
kPrintDRegValue = 1 << kDRegSize,
kPrintSRegValue = 1 << kSRegSize,
kPrintAllFPRegValues = kPrintDRegValue | kPrintSRegValue
};
// Print individual register values (after update).
void PrintRegister(unsigned code, Reg31Mode r31mode = Reg31IsStackPointer);
void PrintFPRegister(unsigned code,
PrintFPRegisterSizes sizes = kPrintAllFPRegValues);
void PrintSystemRegister(SystemRegister id);
// Like Print* (above), but respect log_parameters().
void LogRegister(unsigned code, Reg31Mode r31mode = Reg31IsStackPointer) {
if (log_parameters() & LOG_REGS) PrintRegister(code, r31mode);
} }
void LogWrite(uint8_t* address, uint64_t value, unsigned num_bytes) { void LogFPRegister(unsigned code,
if (log_parameters_ & LOG_WRITE) PrintWrite(address, value, num_bytes); PrintFPRegisterSizes sizes = kPrintAllFPRegValues) {
if (log_parameters() & LOG_FP_REGS) PrintFPRegister(code, sizes);
}
void LogSystemRegister(SystemRegister id) {
if (log_parameters() & LOG_SYS_REGS) PrintSystemRegister(id);
}
// Print memory accesses.
void PrintRead(uintptr_t address, size_t size, unsigned reg_code);
void PrintReadFP(uintptr_t address, size_t size, unsigned reg_code);
void PrintWrite(uintptr_t address, size_t size, unsigned reg_code);
void PrintWriteFP(uintptr_t address, size_t size, unsigned reg_code);
// Like Print* (above), but respect log_parameters().
void LogRead(uintptr_t address, size_t size, unsigned reg_code) {
if (log_parameters() & LOG_REGS) PrintRead(address, size, reg_code);
}
void LogReadFP(uintptr_t address, size_t size, unsigned reg_code) {
if (log_parameters() & LOG_FP_REGS) PrintReadFP(address, size, reg_code);
}
void LogWrite(uintptr_t address, size_t size, unsigned reg_code) {
if (log_parameters() & LOG_WRITE) PrintWrite(address, size, reg_code);
}
void LogWriteFP(uintptr_t address, size_t size, unsigned reg_code) {
if (log_parameters() & LOG_WRITE) PrintWriteFP(address, size, reg_code);
} }
int log_parameters() { return log_parameters_; } int log_parameters() { return log_parameters_; }
@ -589,28 +666,30 @@ class Simulator : public DecoderVisitor {
int64_t offset, int64_t offset,
AddrMode addrmode); AddrMode addrmode);
void LoadStorePairHelper(Instruction* instr, AddrMode addrmode); void LoadStorePairHelper(Instruction* instr, AddrMode addrmode);
uint8_t* LoadStoreAddress(unsigned addr_reg, uintptr_t LoadStoreAddress(unsigned addr_reg, int64_t offset,
int64_t offset,
AddrMode addrmode); AddrMode addrmode);
void LoadStoreWriteBack(unsigned addr_reg, void LoadStoreWriteBack(unsigned addr_reg,
int64_t offset, int64_t offset,
AddrMode addrmode); AddrMode addrmode);
void CheckMemoryAccess(uint8_t* address, uint8_t* stack); void CheckMemoryAccess(uintptr_t address, uintptr_t stack);
uint64_t MemoryRead(uint8_t* address, unsigned num_bytes); // Memory read helpers.
uint8_t MemoryRead8(uint8_t* address); template <typename T, typename A>
uint16_t MemoryRead16(uint8_t* address); T MemoryRead(A address) {
uint32_t MemoryRead32(uint8_t* address); T value;
float MemoryReadFP32(uint8_t* address); STATIC_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) ||
uint64_t MemoryRead64(uint8_t* address); (sizeof(value) == 4) || (sizeof(value) == 8));
double MemoryReadFP64(uint8_t* address); memcpy(&value, reinterpret_cast<const void*>(address), sizeof(value));
return value;
void MemoryWrite(uint8_t* address, uint64_t value, unsigned num_bytes); }
void MemoryWrite32(uint8_t* address, uint32_t value);
void MemoryWriteFP32(uint8_t* address, float value);
void MemoryWrite64(uint8_t* address, uint64_t value);
void MemoryWriteFP64(uint8_t* address, double value);
// Memory write helpers.
template <typename T, typename A>
void MemoryWrite(A address, T value) {
STATIC_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) ||
(sizeof(value) == 4) || (sizeof(value) == 8));
memcpy(reinterpret_cast<void*>(address), &value, sizeof(value));
}
template <typename T> template <typename T>
T ShiftOperand(T value, T ShiftOperand(T value,
@ -763,10 +842,10 @@ class Simulator : public DecoderVisitor {
static const uint32_t kConditionFlagsMask = 0xf0000000; static const uint32_t kConditionFlagsMask = 0xf0000000;
// Stack // Stack
byte* stack_; uintptr_t stack_;
static const intptr_t stack_protection_size_ = KB; static const size_t stack_protection_size_ = KB;
intptr_t stack_size_; size_t stack_size_;
byte* stack_limit_; uintptr_t stack_limit_;
Decoder<DispatchingDecoderVisitor>* decoder_; Decoder<DispatchingDecoderVisitor>* decoder_;
Decoder<DispatchingDecoderVisitor>* disassembler_decoder_; Decoder<DispatchingDecoderVisitor>* disassembler_decoder_;

View File

@ -50,7 +50,7 @@ function ArrayIteratorIterator() {
function ArrayIteratorNext() { function ArrayIteratorNext() {
var iterator = ToObject(this); var iterator = ToObject(this);
if (!HAS_PRIVATE(iterator, arrayIteratorObjectSymbol)) { if (!HAS_DEFINED_PRIVATE(iterator, arrayIteratorNextIndexSymbol)) {
throw MakeTypeError('incompatible_method_receiver', throw MakeTypeError('incompatible_method_receiver',
['Array Iterator.prototype.next']); ['Array Iterator.prototype.next']);
} }

29
deps/v8/src/array.js vendored
View File

@ -144,7 +144,7 @@ function Join(array, length, separator, convert) {
elements[elements_length++] = e; elements[elements_length++] = e;
} }
elements.length = elements_length; elements.length = elements_length;
var result = %_FastAsciiArrayJoin(elements, ''); var result = %_FastOneByteArrayJoin(elements, '');
if (!IS_UNDEFINED(result)) return result; if (!IS_UNDEFINED(result)) return result;
return %StringBuilderConcat(elements, elements_length, ''); return %StringBuilderConcat(elements, elements_length, '');
} }
@ -168,7 +168,7 @@ function Join(array, length, separator, convert) {
elements[i] = e; elements[i] = e;
} }
} }
var result = %_FastAsciiArrayJoin(elements, separator); var result = %_FastOneByteArrayJoin(elements, separator);
if (!IS_UNDEFINED(result)) return result; if (!IS_UNDEFINED(result)) return result;
return %StringBuilderJoin(elements, length, separator); return %StringBuilderJoin(elements, length, separator);
@ -375,7 +375,7 @@ function ArrayJoin(separator) {
separator = NonStringToString(separator); separator = NonStringToString(separator);
} }
var result = %_FastAsciiArrayJoin(array, separator); var result = %_FastOneByteArrayJoin(array, separator);
if (!IS_UNDEFINED(result)) return result; if (!IS_UNDEFINED(result)) return result;
return Join(array, length, separator, ConvertToString); return Join(array, length, separator, ConvertToString);
@ -863,11 +863,12 @@ function ArraySort(comparefn) {
var t_array = []; var t_array = [];
// Use both 'from' and 'to' to determine the pivot candidates. // Use both 'from' and 'to' to determine the pivot candidates.
var increment = 200 + ((to - from) & 15); var increment = 200 + ((to - from) & 15);
for (var i = from + 1; i < to - 1; i += increment) { for (var i = from + 1, j = 0; i < to - 1; i += increment, j++) {
t_array.push([i, a[i]]); t_array[j] = [i, a[i]];
} }
t_array.sort(function(a, b) { %_CallFunction(t_array, function(a, b) {
return %_CallFunction(receiver, a[1], b[1], comparefn) } ); return %_CallFunction(receiver, a[1], b[1], comparefn);
}, ArraySort);
var third_index = t_array[t_array.length >> 1][0]; var third_index = t_array[t_array.length >> 1][0];
return third_index; return third_index;
} }
@ -969,7 +970,7 @@ function ArraySort(comparefn) {
// It's an interval. // It's an interval.
var proto_length = indices; var proto_length = indices;
for (var i = 0; i < proto_length; i++) { for (var i = 0; i < proto_length; i++) {
if (!obj.hasOwnProperty(i) && proto.hasOwnProperty(i)) { if (!HAS_OWN_PROPERTY(obj, i) && HAS_OWN_PROPERTY(proto, i)) {
obj[i] = proto[i]; obj[i] = proto[i];
if (i >= max) { max = i + 1; } if (i >= max) { max = i + 1; }
} }
@ -977,8 +978,8 @@ function ArraySort(comparefn) {
} else { } else {
for (var i = 0; i < indices.length; i++) { for (var i = 0; i < indices.length; i++) {
var index = indices[i]; var index = indices[i];
if (!IS_UNDEFINED(index) && if (!IS_UNDEFINED(index) && !HAS_OWN_PROPERTY(obj, index)
!obj.hasOwnProperty(index) && proto.hasOwnProperty(index)) { && HAS_OWN_PROPERTY(proto, index)) {
obj[index] = proto[index]; obj[index] = proto[index];
if (index >= max) { max = index + 1; } if (index >= max) { max = index + 1; }
} }
@ -998,7 +999,7 @@ function ArraySort(comparefn) {
// It's an interval. // It's an interval.
var proto_length = indices; var proto_length = indices;
for (var i = from; i < proto_length; i++) { for (var i = from; i < proto_length; i++) {
if (proto.hasOwnProperty(i)) { if (HAS_OWN_PROPERTY(proto, i)) {
obj[i] = UNDEFINED; obj[i] = UNDEFINED;
} }
} }
@ -1006,7 +1007,7 @@ function ArraySort(comparefn) {
for (var i = 0; i < indices.length; i++) { for (var i = 0; i < indices.length; i++) {
var index = indices[i]; var index = indices[i];
if (!IS_UNDEFINED(index) && from <= index && if (!IS_UNDEFINED(index) && from <= index &&
proto.hasOwnProperty(index)) { HAS_OWN_PROPERTY(proto, index)) {
obj[index] = UNDEFINED; obj[index] = UNDEFINED;
} }
} }
@ -1029,14 +1030,14 @@ function ArraySort(comparefn) {
} }
// Maintain the invariant num_holes = the number of holes in the original // Maintain the invariant num_holes = the number of holes in the original
// array with indices <= first_undefined or > last_defined. // array with indices <= first_undefined or > last_defined.
if (!obj.hasOwnProperty(first_undefined)) { if (!HAS_OWN_PROPERTY(obj, first_undefined)) {
num_holes++; num_holes++;
} }
// Find last defined element. // Find last defined element.
while (first_undefined < last_defined && while (first_undefined < last_defined &&
IS_UNDEFINED(obj[last_defined])) { IS_UNDEFINED(obj[last_defined])) {
if (!obj.hasOwnProperty(last_defined)) { if (!HAS_OWN_PROPERTY(obj, last_defined)) {
num_holes++; num_holes++;
} }
last_defined--; last_defined--;

View File

@ -40,19 +40,20 @@
#include "src/base/lazy-instance.h" #include "src/base/lazy-instance.h"
#include "src/base/platform/platform.h" #include "src/base/platform/platform.h"
#include "src/builtins.h" #include "src/builtins.h"
#include "src/codegen.h"
#include "src/counters.h" #include "src/counters.h"
#include "src/cpu-profiler.h" #include "src/cpu-profiler.h"
#include "src/debug.h" #include "src/debug.h"
#include "src/deoptimizer.h" #include "src/deoptimizer.h"
#include "src/execution.h" #include "src/execution.h"
#include "src/ic.h" #include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/isolate-inl.h" #include "src/isolate-inl.h"
#include "src/jsregexp.h" #include "src/jsregexp.h"
#include "src/regexp-macro-assembler.h" #include "src/regexp-macro-assembler.h"
#include "src/regexp-stack.h" #include "src/regexp-stack.h"
#include "src/runtime.h" #include "src/runtime/runtime.h"
#include "src/serialize.h" #include "src/serialize.h"
#include "src/stub-cache.h"
#include "src/token.h" #include "src/token.h"
#if V8_TARGET_ARCH_IA32 #if V8_TARGET_ARCH_IA32
@ -882,7 +883,7 @@ void ExternalReference::SetUp() {
double_constants.one_half = 0.5; double_constants.one_half = 0.5;
double_constants.minus_one_half = -0.5; double_constants.minus_one_half = -0.5;
double_constants.canonical_non_hole_nan = base::OS::nan_value(); double_constants.canonical_non_hole_nan = base::OS::nan_value();
double_constants.the_hole_nan = BitCast<double>(kHoleNanInt64); double_constants.the_hole_nan = bit_cast<double>(kHoleNanInt64);
double_constants.negative_infinity = -V8_INFINITY; double_constants.negative_infinity = -V8_INFINITY;
double_constants.uint32_bias = double_constants.uint32_bias =
static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1; static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1;
@ -923,9 +924,9 @@ void ExternalReference::InitializeMathExpData() {
math_exp_log_table_array = new double[kTableSize]; math_exp_log_table_array = new double[kTableSize];
for (int i = 0; i < kTableSize; i++) { for (int i = 0; i < kTableSize; i++) {
double value = std::pow(2, i / kTableSizeDouble); double value = std::pow(2, i / kTableSizeDouble);
uint64_t bits = BitCast<uint64_t, double>(value); uint64_t bits = bit_cast<uint64_t, double>(value);
bits &= (static_cast<uint64_t>(1) << 52) - 1; bits &= (static_cast<uint64_t>(1) << 52) - 1;
double mantissa = BitCast<double, uint64_t>(bits); double mantissa = bit_cast<double, uint64_t>(bits);
math_exp_log_table_array[i] = mantissa; math_exp_log_table_array[i] = mantissa;
} }
@ -936,8 +937,11 @@ void ExternalReference::InitializeMathExpData() {
void ExternalReference::TearDownMathExpData() { void ExternalReference::TearDownMathExpData() {
delete[] math_exp_constants_array; delete[] math_exp_constants_array;
math_exp_constants_array = NULL;
delete[] math_exp_log_table_array; delete[] math_exp_log_table_array;
math_exp_log_table_array = NULL;
delete math_exp_data_mutex; delete math_exp_data_mutex;
math_exp_data_mutex = NULL;
} }
@ -1567,38 +1571,4 @@ bool PositionsRecorder::WriteRecordedPositions() {
return written; return written;
} }
MultiplierAndShift::MultiplierAndShift(int32_t d) {
DCHECK(d <= -2 || 2 <= d);
const uint32_t two31 = 0x80000000;
uint32_t ad = Abs(d);
uint32_t t = two31 + (uint32_t(d) >> 31);
uint32_t anc = t - 1 - t % ad; // Absolute value of nc.
int32_t p = 31; // Init. p.
uint32_t q1 = two31 / anc; // Init. q1 = 2**p/|nc|.
uint32_t r1 = two31 - q1 * anc; // Init. r1 = rem(2**p, |nc|).
uint32_t q2 = two31 / ad; // Init. q2 = 2**p/|d|.
uint32_t r2 = two31 - q2 * ad; // Init. r2 = rem(2**p, |d|).
uint32_t delta;
do {
p++;
q1 *= 2; // Update q1 = 2**p/|nc|.
r1 *= 2; // Update r1 = rem(2**p, |nc|).
if (r1 >= anc) { // Must be an unsigned comparison here.
q1++;
r1 = r1 - anc;
}
q2 *= 2; // Update q2 = 2**p/|d|.
r2 *= 2; // Update r2 = rem(2**p, |d|).
if (r2 >= ad) { // Must be an unsigned comparison here.
q2++;
r2 = r2 - ad;
}
delta = ad - r2;
} while (q1 < delta || (q1 == delta && r1 == 0));
int32_t mul = static_cast<int32_t>(q2 + 1);
multiplier_ = (d < 0) ? -mul : mul;
shift_ = p - 32;
}
} } // namespace v8::internal } } // namespace v8::internal

View File

@ -41,7 +41,7 @@
#include "src/builtins.h" #include "src/builtins.h"
#include "src/gdb-jit.h" #include "src/gdb-jit.h"
#include "src/isolate.h" #include "src/isolate.h"
#include "src/runtime.h" #include "src/runtime/runtime.h"
#include "src/token.h" #include "src/token.h"
namespace v8 { namespace v8 {
@ -459,9 +459,7 @@ class RelocInfo {
Mode rmode() const { return rmode_; } Mode rmode() const { return rmode_; }
intptr_t data() const { return data_; } intptr_t data() const { return data_; }
double data64() const { return data64_; } double data64() const { return data64_; }
uint64_t raw_data64() { uint64_t raw_data64() { return bit_cast<uint64_t>(data64_); }
return BitCast<uint64_t>(data64_);
}
Code* host() const { return host_; } Code* host() const { return host_; }
void set_host(Code* host) { host_ = host; } void set_host(Code* host) { host_ = host; }
@ -774,12 +772,12 @@ class ExternalReference BASE_EMBEDDED {
PROFILING_API_CALL, PROFILING_API_CALL,
// Direct call to accessor getter callback. // Direct call to accessor getter callback.
// void f(Local<String> property, PropertyCallbackInfo& info) // void f(Local<Name> property, PropertyCallbackInfo& info)
DIRECT_GETTER_CALL, DIRECT_GETTER_CALL,
// Call to accessor getter callback via InvokeAccessorGetterCallback. // Call to accessor getter callback via InvokeAccessorGetterCallback.
// void f(Local<String> property, PropertyCallbackInfo& info, // void f(Local<Name> property, PropertyCallbackInfo& info,
// AccessorGetterCallback callback) // AccessorNameGetterCallback callback)
PROFILING_GETTER_CALL PROFILING_GETTER_CALL
}; };
@ -1110,20 +1108,6 @@ class NullCallWrapper : public CallWrapper {
}; };
// The multiplier and shift for signed division via multiplication, see Warren's
// "Hacker's Delight", chapter 10.
class MultiplierAndShift {
public:
explicit MultiplierAndShift(int32_t d);
int32_t multiplier() const { return multiplier_; }
int32_t shift() const { return shift_; }
private:
int32_t multiplier_;
int32_t shift_;
};
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_ASSEMBLER_H_ #endif // V8_ASSEMBLER_H_

View File

@ -2,20 +2,154 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
#include "src/assert-scope.h" #include "src/assert-scope.h"
#include "src/v8.h"
#include "src/base/lazy-instance.h"
#include "src/base/platform/platform.h"
#include "src/isolate-inl.h"
#include "src/utils.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
uint32_t PerIsolateAssertBase::GetData(Isolate* isolate) { namespace {
return isolate->per_isolate_assert_data();
struct PerThreadAssertKeyConstructTrait FINAL {
static void Construct(base::Thread::LocalStorageKey* key) {
*key = base::Thread::CreateThreadLocalKey();
}
};
typedef base::LazyStaticInstance<base::Thread::LocalStorageKey,
PerThreadAssertKeyConstructTrait>::type
PerThreadAssertKey;
PerThreadAssertKey kPerThreadAssertKey;
} // namespace
class PerThreadAssertData FINAL {
public:
PerThreadAssertData() : nesting_level_(0) {
for (int i = 0; i < LAST_PER_THREAD_ASSERT_TYPE; i++) {
assert_states_[i] = true;
}
}
~PerThreadAssertData() {
for (int i = 0; i < LAST_PER_THREAD_ASSERT_TYPE; ++i) {
DCHECK(assert_states_[i]);
}
}
bool Get(PerThreadAssertType type) const { return assert_states_[type]; }
void Set(PerThreadAssertType type, bool x) { assert_states_[type] = x; }
void IncrementLevel() { ++nesting_level_; }
bool DecrementLevel() { return --nesting_level_ == 0; }
static PerThreadAssertData* GetCurrent() {
return reinterpret_cast<PerThreadAssertData*>(
base::Thread::GetThreadLocal(kPerThreadAssertKey.Get()));
}
static void SetCurrent(PerThreadAssertData* data) {
base::Thread::SetThreadLocal(kPerThreadAssertKey.Get(), data);
}
private:
bool assert_states_[LAST_PER_THREAD_ASSERT_TYPE];
int nesting_level_;
DISALLOW_COPY_AND_ASSIGN(PerThreadAssertData);
};
template <PerThreadAssertType kType, bool kAllow>
PerThreadAssertScope<kType, kAllow>::PerThreadAssertScope()
: data_(PerThreadAssertData::GetCurrent()) {
if (data_ == NULL) {
data_ = new PerThreadAssertData();
PerThreadAssertData::SetCurrent(data_);
}
data_->IncrementLevel();
old_state_ = data_->Get(kType);
data_->Set(kType, kAllow);
} }
void PerIsolateAssertBase::SetData(Isolate* isolate, uint32_t data) { template <PerThreadAssertType kType, bool kAllow>
isolate->set_per_isolate_assert_data(data); PerThreadAssertScope<kType, kAllow>::~PerThreadAssertScope() {
DCHECK_NOT_NULL(data_);
data_->Set(kType, old_state_);
if (data_->DecrementLevel()) {
PerThreadAssertData::SetCurrent(NULL);
delete data_;
}
} }
} } // namespace v8::internal
// static
template <PerThreadAssertType kType, bool kAllow>
bool PerThreadAssertScope<kType, kAllow>::IsAllowed() {
PerThreadAssertData* data = PerThreadAssertData::GetCurrent();
return data == NULL || data->Get(kType);
}
template <PerIsolateAssertType kType, bool kAllow>
class PerIsolateAssertScope<kType, kAllow>::DataBit
: public BitField<bool, kType, 1> {};
template <PerIsolateAssertType kType, bool kAllow>
PerIsolateAssertScope<kType, kAllow>::PerIsolateAssertScope(Isolate* isolate)
: isolate_(isolate), old_data_(isolate->per_isolate_assert_data()) {
DCHECK_NOT_NULL(isolate);
STATIC_ASSERT(kType < 32);
isolate_->set_per_isolate_assert_data(DataBit::update(old_data_, kAllow));
}
template <PerIsolateAssertType kType, bool kAllow>
PerIsolateAssertScope<kType, kAllow>::~PerIsolateAssertScope() {
isolate_->set_per_isolate_assert_data(old_data_);
}
// static
template <PerIsolateAssertType kType, bool kAllow>
bool PerIsolateAssertScope<kType, kAllow>::IsAllowed(Isolate* isolate) {
return DataBit::decode(isolate->per_isolate_assert_data());
}
// -----------------------------------------------------------------------------
// Instantiations.
template class PerThreadAssertScope<HEAP_ALLOCATION_ASSERT, false>;
template class PerThreadAssertScope<HEAP_ALLOCATION_ASSERT, true>;
template class PerThreadAssertScope<HANDLE_ALLOCATION_ASSERT, false>;
template class PerThreadAssertScope<HANDLE_ALLOCATION_ASSERT, true>;
template class PerThreadAssertScope<HANDLE_DEREFERENCE_ASSERT, false>;
template class PerThreadAssertScope<HANDLE_DEREFERENCE_ASSERT, true>;
template class PerThreadAssertScope<DEFERRED_HANDLE_DEREFERENCE_ASSERT, false>;
template class PerThreadAssertScope<DEFERRED_HANDLE_DEREFERENCE_ASSERT, true>;
template class PerThreadAssertScope<CODE_DEPENDENCY_CHANGE_ASSERT, false>;
template class PerThreadAssertScope<CODE_DEPENDENCY_CHANGE_ASSERT, true>;
template class PerIsolateAssertScope<JAVASCRIPT_EXECUTION_ASSERT, false>;
template class PerIsolateAssertScope<JAVASCRIPT_EXECUTION_ASSERT, true>;
template class PerIsolateAssertScope<JAVASCRIPT_EXECUTION_THROWS, false>;
template class PerIsolateAssertScope<JAVASCRIPT_EXECUTION_THROWS, true>;
template class PerIsolateAssertScope<ALLOCATION_FAILURE_ASSERT, false>;
template class PerIsolateAssertScope<ALLOCATION_FAILURE_ASSERT, true>;
template class PerIsolateAssertScope<DEOPTIMIZATION_ASSERT, false>;
template class PerIsolateAssertScope<DEOPTIMIZATION_ASSERT, true>;
template class PerIsolateAssertScope<COMPILATION_ASSERT, false>;
template class PerIsolateAssertScope<COMPILATION_ASSERT, true>;
} // namespace internal
} // namespace v8

View File

@ -5,14 +5,16 @@
#ifndef V8_ASSERT_SCOPE_H_ #ifndef V8_ASSERT_SCOPE_H_
#define V8_ASSERT_SCOPE_H_ #define V8_ASSERT_SCOPE_H_
#include "src/allocation.h" #include "include/v8stdint.h"
#include "src/base/platform/platform.h" #include "src/base/macros.h"
#include "src/utils.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
// Forward declarations.
class Isolate; class Isolate;
class PerThreadAssertData;
enum PerThreadAssertType { enum PerThreadAssertType {
HEAP_ALLOCATION_ASSERT, HEAP_ALLOCATION_ASSERT,
@ -33,120 +35,35 @@ enum PerIsolateAssertType {
}; };
class PerThreadAssertData { template <PerThreadAssertType kType, bool kAllow>
class PerThreadAssertScope {
public: public:
PerThreadAssertData() : nesting_level_(0) { PerThreadAssertScope();
for (int i = 0; i < LAST_PER_THREAD_ASSERT_TYPE; i++) { ~PerThreadAssertScope();
assert_states_[i] = true;
}
}
void set(PerThreadAssertType type, bool allow) { static bool IsAllowed();
assert_states_[type] = allow;
}
bool get(PerThreadAssertType type) const {
return assert_states_[type];
}
void increment_level() { ++nesting_level_; }
bool decrement_level() { return --nesting_level_ == 0; }
private: private:
bool assert_states_[LAST_PER_THREAD_ASSERT_TYPE];
int nesting_level_;
DISALLOW_COPY_AND_ASSIGN(PerThreadAssertData);
};
class PerThreadAssertScopeBase {
protected:
PerThreadAssertScopeBase() {
data_ = GetAssertData();
if (data_ == NULL) {
data_ = new PerThreadAssertData();
SetThreadLocalData(data_);
}
data_->increment_level();
}
~PerThreadAssertScopeBase() {
if (!data_->decrement_level()) return;
for (int i = 0; i < LAST_PER_THREAD_ASSERT_TYPE; i++) {
DCHECK(data_->get(static_cast<PerThreadAssertType>(i)));
}
delete data_;
SetThreadLocalData(NULL);
}
static PerThreadAssertData* GetAssertData() {
return reinterpret_cast<PerThreadAssertData*>(
base::Thread::GetThreadLocal(thread_local_key));
}
static base::Thread::LocalStorageKey thread_local_key;
PerThreadAssertData* data_; PerThreadAssertData* data_;
friend class Isolate;
private:
static void SetThreadLocalData(PerThreadAssertData* data) {
base::Thread::SetThreadLocal(thread_local_key, data);
}
};
template <PerThreadAssertType type, bool allow>
class PerThreadAssertScope : public PerThreadAssertScopeBase {
public:
PerThreadAssertScope() {
old_state_ = data_->get(type);
data_->set(type, allow);
}
~PerThreadAssertScope() { data_->set(type, old_state_); }
static bool IsAllowed() {
PerThreadAssertData* data = GetAssertData();
return data == NULL || data->get(type);
}
private:
bool old_state_; bool old_state_;
DISALLOW_COPY_AND_ASSIGN(PerThreadAssertScope); DISALLOW_COPY_AND_ASSIGN(PerThreadAssertScope);
}; };
class PerIsolateAssertBase {
protected:
static uint32_t GetData(Isolate* isolate);
static void SetData(Isolate* isolate, uint32_t data);
};
template <PerIsolateAssertType type, bool allow> template <PerIsolateAssertType type, bool allow>
class PerIsolateAssertScope : public PerIsolateAssertBase { class PerIsolateAssertScope {
public: public:
explicit PerIsolateAssertScope(Isolate* isolate) : isolate_(isolate) { explicit PerIsolateAssertScope(Isolate* isolate);
STATIC_ASSERT(type < 32); ~PerIsolateAssertScope();
old_data_ = GetData(isolate_);
SetData(isolate_, DataBit::update(old_data_, allow));
}
~PerIsolateAssertScope() { static bool IsAllowed(Isolate* isolate);
SetData(isolate_, old_data_);
}
static bool IsAllowed(Isolate* isolate) {
return DataBit::decode(GetData(isolate));
}
private: private:
typedef BitField<bool, type, 1> DataBit; class DataBit;
uint32_t old_data_;
Isolate* isolate_; Isolate* isolate_;
uint32_t old_data_;
DISALLOW_COPY_AND_ASSIGN(PerIsolateAssertScope); DISALLOW_COPY_AND_ASSIGN(PerIsolateAssertScope);
}; };

View File

@ -56,22 +56,22 @@ class AstRawStringInternalizationKey : public HashTableKey {
explicit AstRawStringInternalizationKey(const AstRawString* string) explicit AstRawStringInternalizationKey(const AstRawString* string)
: string_(string) {} : string_(string) {}
virtual bool IsMatch(Object* other) V8_OVERRIDE { virtual bool IsMatch(Object* other) OVERRIDE {
if (string_->is_one_byte_) if (string_->is_one_byte_)
return String::cast(other)->IsOneByteEqualTo(string_->literal_bytes_); return String::cast(other)->IsOneByteEqualTo(string_->literal_bytes_);
return String::cast(other)->IsTwoByteEqualTo( return String::cast(other)->IsTwoByteEqualTo(
Vector<const uint16_t>::cast(string_->literal_bytes_)); Vector<const uint16_t>::cast(string_->literal_bytes_));
} }
virtual uint32_t Hash() V8_OVERRIDE { virtual uint32_t Hash() OVERRIDE {
return string_->hash() >> Name::kHashShift; return string_->hash() >> Name::kHashShift;
} }
virtual uint32_t HashForObject(Object* key) V8_OVERRIDE { virtual uint32_t HashForObject(Object* key) OVERRIDE {
return String::cast(key)->Hash(); return String::cast(key)->Hash();
} }
virtual Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE { virtual Handle<Object> AsHandle(Isolate* isolate) OVERRIDE {
if (string_->is_one_byte_) if (string_->is_one_byte_)
return isolate->factory()->NewOneByteInternalizedString( return isolate->factory()->NewOneByteInternalizedString(
string_->literal_bytes_, string_->hash()); string_->literal_bytes_, string_->hash());
@ -249,7 +249,7 @@ const AstRawString* AstValueFactory::GetTwoByteString(
const AstRawString* AstValueFactory::GetString(Handle<String> literal) { const AstRawString* AstValueFactory::GetString(Handle<String> literal) {
DisallowHeapAllocation no_gc; DisallowHeapAllocation no_gc;
String::FlatContent content = literal->GetFlatContent(); String::FlatContent content = literal->GetFlatContent();
if (content.IsAscii()) { if (content.IsOneByte()) {
return GetOneByteString(content.ToOneByteVector()); return GetOneByteString(content.ToOneByteVector());
} }
DCHECK(content.IsTwoByte()); DCHECK(content.IsTwoByte());

View File

@ -64,13 +64,13 @@ class AstString : public ZoneObject {
class AstRawString : public AstString { class AstRawString : public AstString {
public: public:
virtual int length() const V8_OVERRIDE { virtual int length() const OVERRIDE {
if (is_one_byte_) if (is_one_byte_)
return literal_bytes_.length(); return literal_bytes_.length();
return literal_bytes_.length() / 2; return literal_bytes_.length() / 2;
} }
virtual void Internalize(Isolate* isolate) V8_OVERRIDE; virtual void Internalize(Isolate* isolate) OVERRIDE;
bool AsArrayIndex(uint32_t* index) const; bool AsArrayIndex(uint32_t* index) const;
@ -120,11 +120,11 @@ class AstConsString : public AstString {
: left_(left), : left_(left),
right_(right) {} right_(right) {}
virtual int length() const V8_OVERRIDE { virtual int length() const OVERRIDE {
return left_->length() + right_->length(); return left_->length() + right_->length();
} }
virtual void Internalize(Isolate* isolate) V8_OVERRIDE; virtual void Internalize(Isolate* isolate) OVERRIDE;
private: private:
friend class AstValueFactory; friend class AstValueFactory;
@ -238,6 +238,7 @@ class AstValue : public ZoneObject {
#define STRING_CONSTANTS(F) \ #define STRING_CONSTANTS(F) \
F(anonymous_function, "(anonymous function)") \ F(anonymous_function, "(anonymous function)") \
F(arguments, "arguments") \ F(arguments, "arguments") \
F(constructor, "constructor") \
F(done, "done") \ F(done, "done") \
F(dot, ".") \ F(dot, ".") \
F(dot_for, ".for") \ F(dot_for, ".for") \

95
deps/v8/src/ast.cc vendored
View File

@ -59,8 +59,9 @@ bool Expression::IsUndefinedLiteral(Isolate* isolate) const {
} }
VariableProxy::VariableProxy(Zone* zone, Variable* var, int position) VariableProxy::VariableProxy(Zone* zone, Variable* var, int position,
: Expression(zone, position), IdGen* id_gen)
: Expression(zone, position, id_gen),
name_(var->raw_name()), name_(var->raw_name()),
var_(NULL), // Will be set by the call to BindTo. var_(NULL), // Will be set by the call to BindTo.
is_this_(var->is_this()), is_this_(var->is_this()),
@ -71,19 +72,15 @@ VariableProxy::VariableProxy(Zone* zone, Variable* var, int position)
} }
VariableProxy::VariableProxy(Zone* zone, VariableProxy::VariableProxy(Zone* zone, const AstRawString* name, bool is_this,
const AstRawString* name, Interface* interface, int position, IdGen* id_gen)
bool is_this, : Expression(zone, position, id_gen),
Interface* interface,
int position)
: Expression(zone, position),
name_(name), name_(name),
var_(NULL), var_(NULL),
is_this_(is_this), is_this_(is_this),
is_assigned_(false), is_assigned_(false),
interface_(interface), interface_(interface),
variable_feedback_slot_(kInvalidFeedbackSlot) { variable_feedback_slot_(kInvalidFeedbackSlot) {}
}
void VariableProxy::BindTo(Variable* var) { void VariableProxy::BindTo(Variable* var) {
@ -101,17 +98,14 @@ void VariableProxy::BindTo(Variable* var) {
} }
Assignment::Assignment(Zone* zone, Assignment::Assignment(Zone* zone, Token::Value op, Expression* target,
Token::Value op, Expression* value, int pos, IdGen* id_gen)
Expression* target, : Expression(zone, pos, id_gen),
Expression* value,
int pos)
: Expression(zone, pos),
op_(op), op_(op),
target_(target), target_(target),
value_(value), value_(value),
binary_operation_(NULL), binary_operation_(NULL),
assignment_id_(GetNextId(zone)), assignment_id_(id_gen->GetNextId()),
is_uninitialized_(false), is_uninitialized_(false),
store_mode_(STANDARD_STORE) {} store_mode_(STANDARD_STORE) {}
@ -179,10 +173,12 @@ void FunctionLiteral::InitializeSharedInfo(
ObjectLiteralProperty::ObjectLiteralProperty(Zone* zone, ObjectLiteralProperty::ObjectLiteralProperty(Zone* zone,
AstValueFactory* ast_value_factory, AstValueFactory* ast_value_factory,
Literal* key, Expression* value) { Literal* key, Expression* value,
bool is_static) {
emit_store_ = true; emit_store_ = true;
key_ = key; key_ = key;
value_ = value; value_ = value;
is_static_ = is_static;
if (key->raw_value()->EqualsString(ast_value_factory->proto_string())) { if (key->raw_value()->EqualsString(ast_value_factory->proto_string())) {
kind_ = PROTOTYPE; kind_ = PROTOTYPE;
} else if (value_->AsMaterializedLiteral() != NULL) { } else if (value_->AsMaterializedLiteral() != NULL) {
@ -195,11 +191,13 @@ ObjectLiteralProperty::ObjectLiteralProperty(Zone* zone,
} }
ObjectLiteralProperty::ObjectLiteralProperty( ObjectLiteralProperty::ObjectLiteralProperty(Zone* zone, bool is_getter,
Zone* zone, bool is_getter, FunctionLiteral* value) { FunctionLiteral* value,
bool is_static) {
emit_store_ = true; emit_store_ = true;
value_ = value; value_ = value;
kind_ = is_getter ? GETTER : SETTER; kind_ = is_getter ? GETTER : SETTER;
is_static_ = is_static;
} }
@ -590,18 +588,16 @@ Call::CallType Call::GetCallType(Isolate* isolate) const {
bool Call::ComputeGlobalTarget(Handle<GlobalObject> global, bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
LookupResult* lookup) { LookupIterator* it) {
target_ = Handle<JSFunction>::null(); target_ = Handle<JSFunction>::null();
cell_ = Handle<Cell>::null(); cell_ = Handle<Cell>::null();
DCHECK(lookup->IsFound() && DCHECK(it->IsFound() && it->GetHolder<JSObject>().is_identical_to(global));
lookup->type() == NORMAL && cell_ = it->GetPropertyCell();
lookup->holder() == *global);
cell_ = Handle<Cell>(global->GetPropertyCell(lookup));
if (cell_->value()->IsJSFunction()) { if (cell_->value()->IsJSFunction()) {
Handle<JSFunction> candidate(JSFunction::cast(cell_->value())); Handle<JSFunction> candidate(JSFunction::cast(cell_->value()));
// If the function is in new space we assume it's more likely to // If the function is in new space we assume it's more likely to
// change and thus prefer the general IC code. // change and thus prefer the general IC code.
if (!lookup->isolate()->heap()->InNewSpace(*candidate)) { if (!it->isolate()->heap()->InNewSpace(*candidate)) {
target_ = candidate; target_ = candidate;
return true; return true;
} }
@ -619,9 +615,6 @@ void CallNew::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
is_monomorphic_ = oracle->CallNewIsMonomorphic(CallNewFeedbackSlot()); is_monomorphic_ = oracle->CallNewIsMonomorphic(CallNewFeedbackSlot());
if (is_monomorphic_) { if (is_monomorphic_) {
target_ = oracle->GetCallNewTarget(CallNewFeedbackSlot()); target_ = oracle->GetCallNewTarget(CallNewFeedbackSlot());
if (!allocation_site_.is_null()) {
elements_kind_ = allocation_site_->GetElementsKind();
}
} }
} }
@ -799,12 +792,12 @@ bool RegExpCapture::IsAnchoredAtEnd() {
// in as many cases as possible, to make it more difficult for incorrect // in as many cases as possible, to make it more difficult for incorrect
// parses to look as correct ones which is likely if the input and // parses to look as correct ones which is likely if the input and
// output formats are alike. // output formats are alike.
class RegExpUnparser V8_FINAL : public RegExpVisitor { class RegExpUnparser FINAL : public RegExpVisitor {
public: public:
RegExpUnparser(OStream& os, Zone* zone) : os_(os), zone_(zone) {} RegExpUnparser(OStream& os, Zone* zone) : os_(os), zone_(zone) {}
void VisitCharacterRange(CharacterRange that); void VisitCharacterRange(CharacterRange that);
#define MAKE_CASE(Name) virtual void* Visit##Name(RegExp##Name*, \ #define MAKE_CASE(Name) virtual void* Visit##Name(RegExp##Name*, \
void* data) V8_OVERRIDE; void* data) OVERRIDE;
FOR_EACH_REG_EXP_TREE_TYPE(MAKE_CASE) FOR_EACH_REG_EXP_TREE_TYPE(MAKE_CASE)
#undef MAKE_CASE #undef MAKE_CASE
private: private:
@ -995,17 +988,14 @@ RegExpAlternative::RegExpAlternative(ZoneList<RegExpTree*>* nodes)
} }
CaseClause::CaseClause(Zone* zone, CaseClause::CaseClause(Zone* zone, Expression* label,
Expression* label, ZoneList<Statement*>* statements, int pos, IdGen* id_gen)
ZoneList<Statement*>* statements, : Expression(zone, pos, id_gen),
int pos)
: Expression(zone, pos),
label_(label), label_(label),
statements_(statements), statements_(statements),
compare_type_(Type::None(zone)), compare_type_(Type::None(zone)),
compare_id_(AstNode::GetNextId(zone)), compare_id_(id_gen->GetNextId()),
entry_id_(AstNode::GetNextId(zone)) { entry_id_(id_gen->GetNextId()) {}
}
#define REGULAR_NODE(NodeType) \ #define REGULAR_NODE(NodeType) \
@ -1020,14 +1010,21 @@ CaseClause::CaseClause(Zone* zone,
#define DONT_OPTIMIZE_NODE(NodeType) \ #define DONT_OPTIMIZE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \ void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \ increase_node_count(); \
set_dont_optimize_reason(k##NodeType); \ set_dont_crankshaft_reason(k##NodeType); \
add_flag(kDontSelfOptimize); \ add_flag(kDontSelfOptimize); \
} }
#define DONT_OPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(NodeType) \ #define DONT_OPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \ void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \ increase_node_count(); \
add_slot_node(node); \ add_slot_node(node); \
set_dont_optimize_reason(k##NodeType); \ set_dont_crankshaft_reason(k##NodeType); \
add_flag(kDontSelfOptimize); \
}
#define DONT_TURBOFAN_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \
set_dont_crankshaft_reason(k##NodeType); \
set_dont_turbofan_reason(k##NodeType); \
add_flag(kDontSelfOptimize); \ add_flag(kDontSelfOptimize); \
} }
#define DONT_SELFOPTIMIZE_NODE(NodeType) \ #define DONT_SELFOPTIMIZE_NODE(NodeType) \
@ -1044,7 +1041,7 @@ CaseClause::CaseClause(Zone* zone,
#define DONT_CACHE_NODE(NodeType) \ #define DONT_CACHE_NODE(NodeType) \
void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \ void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
increase_node_count(); \ increase_node_count(); \
set_dont_optimize_reason(k##NodeType); \ set_dont_crankshaft_reason(k##NodeType); \
add_flag(kDontSelfOptimize); \ add_flag(kDontSelfOptimize); \
add_flag(kDontCache); \ add_flag(kDontCache); \
} }
@ -1093,17 +1090,21 @@ DONT_OPTIMIZE_NODE(ModulePath)
DONT_OPTIMIZE_NODE(ModuleUrl) DONT_OPTIMIZE_NODE(ModuleUrl)
DONT_OPTIMIZE_NODE(ModuleStatement) DONT_OPTIMIZE_NODE(ModuleStatement)
DONT_OPTIMIZE_NODE(WithStatement) DONT_OPTIMIZE_NODE(WithStatement)
DONT_OPTIMIZE_NODE(TryCatchStatement)
DONT_OPTIMIZE_NODE(TryFinallyStatement)
DONT_OPTIMIZE_NODE(DebuggerStatement) DONT_OPTIMIZE_NODE(DebuggerStatement)
DONT_OPTIMIZE_NODE(ClassLiteral)
DONT_OPTIMIZE_NODE(NativeFunctionLiteral) DONT_OPTIMIZE_NODE(NativeFunctionLiteral)
DONT_OPTIMIZE_NODE(SuperReference)
DONT_OPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(Yield) DONT_OPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(Yield)
// TODO(turbofan): Remove the dont_turbofan_reason once this list is empty.
DONT_TURBOFAN_NODE(ForOfStatement)
DONT_TURBOFAN_NODE(TryCatchStatement)
DONT_TURBOFAN_NODE(TryFinallyStatement)
DONT_SELFOPTIMIZE_NODE(DoWhileStatement) DONT_SELFOPTIMIZE_NODE(DoWhileStatement)
DONT_SELFOPTIMIZE_NODE(WhileStatement) DONT_SELFOPTIMIZE_NODE(WhileStatement)
DONT_SELFOPTIMIZE_NODE(ForStatement) DONT_SELFOPTIMIZE_NODE(ForStatement)
DONT_SELFOPTIMIZE_NODE(ForOfStatement)
DONT_SELFOPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(ForInStatement) DONT_SELFOPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(ForInStatement)
@ -1115,7 +1116,7 @@ void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
add_slot_node(node); add_slot_node(node);
if (node->is_jsruntime()) { if (node->is_jsruntime()) {
// Don't try to optimize JS runtime calls because we bailout on them. // Don't try to optimize JS runtime calls because we bailout on them.
set_dont_optimize_reason(kCallToAJavaScriptRuntimeFunction); set_dont_crankshaft_reason(kCallToAJavaScriptRuntimeFunction);
} }
} }
@ -1129,7 +1130,7 @@ Handle<String> Literal::ToString() {
if (value_->IsString()) return value_->AsString()->string(); if (value_->IsString()) return value_->AsString()->string();
DCHECK(value_->IsNumber()); DCHECK(value_->IsNumber());
char arr[100]; char arr[100];
Vector<char> buffer(arr, ARRAY_SIZE(arr)); Vector<char> buffer(arr, arraysize(arr));
const char* str; const char* str;
if (value()->IsSmi()) { if (value()->IsSmi()) {
// Optimization only, the heap number case would subsume this. // Optimization only, the heap number case would subsume this.

1002
deps/v8/src/ast.h vendored

File diff suppressed because it is too large Load Diff

62
deps/v8/src/background-parsing-task.cc vendored Normal file
View File

@ -0,0 +1,62 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/background-parsing-task.h"
namespace v8 {
namespace internal {
BackgroundParsingTask::BackgroundParsingTask(
StreamedSource* source, ScriptCompiler::CompileOptions options,
int stack_size, Isolate* isolate)
: source_(source), options_(options), stack_size_(stack_size) {
// Prepare the data for the internalization phase and compilation phase, which
// will happen in the main thread after parsing.
source->info.Reset(new i::CompilationInfoWithZone(source->source_stream.get(),
source->encoding, isolate));
source->info->MarkAsGlobal();
// We don't set the context to the CompilationInfo yet, because the background
// thread cannot do anything with it anyway. We set it just before compilation
// on the foreground thread.
DCHECK(options == ScriptCompiler::kProduceParserCache ||
options == ScriptCompiler::kProduceCodeCache ||
options == ScriptCompiler::kNoCompileOptions);
source->allow_lazy =
!i::Compiler::DebuggerWantsEagerCompilation(source->info.get());
source->hash_seed = isolate->heap()->HashSeed();
}
void BackgroundParsingTask::Run() {
DisallowHeapAllocation no_allocation;
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
ScriptData* script_data = NULL;
if (options_ == ScriptCompiler::kProduceParserCache ||
options_ == ScriptCompiler::kProduceCodeCache) {
source_->info->SetCachedData(&script_data, options_);
}
uintptr_t limit = reinterpret_cast<uintptr_t>(&limit) - stack_size_ * KB;
Parser::ParseInfo parse_info = {limit, source_->hash_seed,
&source_->unicode_cache};
// Parser needs to stay alive for finalizing the parsing on the main
// thread. Passing &parse_info is OK because Parser doesn't store it.
source_->parser.Reset(new Parser(source_->info.get(), &parse_info));
source_->parser->set_allow_lazy(source_->allow_lazy);
source_->parser->ParseOnBackground();
if (script_data != NULL) {
source_->cached_data.Reset(new ScriptCompiler::CachedData(
script_data->data(), script_data->length(),
ScriptCompiler::CachedData::BufferOwned));
script_data->ReleaseDataOwnership();
delete script_data;
}
}
}
} // namespace v8::internal

67
deps/v8/src/background-parsing-task.h vendored Normal file
View File

@ -0,0 +1,67 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_BACKGROUND_PARSING_TASK_H_
#define V8_BACKGROUND_PARSING_TASK_H_
#include "src/base/platform/platform.h"
#include "src/base/platform/semaphore.h"
#include "src/compiler.h"
#include "src/parser.h"
#include "src/smart-pointers.h"
namespace v8 {
namespace internal {
class Parser;
// Internal representation of v8::ScriptCompiler::StreamedSource. Contains all
// data which needs to be transmitted between threads for background parsing,
// finalizing it on the main thread, and compiling on the main thread.
struct StreamedSource {
StreamedSource(ScriptCompiler::ExternalSourceStream* source_stream,
ScriptCompiler::StreamedSource::Encoding encoding)
: source_stream(source_stream),
encoding(encoding),
hash_seed(0),
allow_lazy(false) {}
// Internal implementation of v8::ScriptCompiler::StreamedSource.
SmartPointer<ScriptCompiler::ExternalSourceStream> source_stream;
ScriptCompiler::StreamedSource::Encoding encoding;
SmartPointer<ScriptCompiler::CachedData> cached_data;
// Data needed for parsing, and data needed to to be passed between thread
// between parsing and compilation. These need to be initialized before the
// compilation starts.
UnicodeCache unicode_cache;
SmartPointer<CompilationInfo> info;
uint32_t hash_seed;
bool allow_lazy;
SmartPointer<Parser> parser;
private:
// Prevent copying. Not implemented.
StreamedSource(const StreamedSource&);
StreamedSource& operator=(const StreamedSource&);
};
class BackgroundParsingTask : public ScriptCompiler::ScriptStreamingTask {
public:
BackgroundParsingTask(StreamedSource* source,
ScriptCompiler::CompileOptions options, int stack_size,
Isolate* isolate);
virtual void Run();
private:
StreamedSource* source_; // Not owned.
ScriptCompiler::CompileOptions options_;
int stack_size_;
};
}
} // namespace v8::internal
#endif // V8_BACKGROUND_PARSING_TASK_H_

20
deps/v8/src/bailout-reason.cc vendored Normal file
View File

@ -0,0 +1,20 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/bailout-reason.h"
#include "src/base/logging.h"
namespace v8 {
namespace internal {
const char* GetBailoutReason(BailoutReason reason) {
DCHECK(reason < kLastErrorMessage);
#define ERROR_MESSAGES_TEXTS(C, T) T,
static const char* error_messages_[] = {
ERROR_MESSAGES_LIST(ERROR_MESSAGES_TEXTS)};
#undef ERROR_MESSAGES_TEXTS
return error_messages_[reason];
}
}
} // namespace v8::internal

339
deps/v8/src/bailout-reason.h vendored Normal file
View File

@ -0,0 +1,339 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_BAILOUT_REASON_H_
#define V8_BAILOUT_REASON_H_
namespace v8 {
namespace internal {
#define ERROR_MESSAGES_LIST(V) \
V(kNoReason, "no reason") \
\
V(k32BitValueInRegisterIsNotZeroExtended, \
"32 bit value in register is not zero-extended") \
V(kAlignmentMarkerExpected, "Alignment marker expected") \
V(kAllocationIsNotDoubleAligned, "Allocation is not double aligned") \
V(kAPICallReturnedInvalidObject, "API call returned invalid object") \
V(kArgumentsObjectValueInATestContext, \
"Arguments object value in a test context") \
V(kArrayBoilerplateCreationFailed, "Array boilerplate creation failed") \
V(kArrayIndexConstantValueTooBig, "Array index constant value too big") \
V(kAssignmentToArguments, "Assignment to arguments") \
V(kAssignmentToLetVariableBeforeInitialization, \
"Assignment to let variable before initialization") \
V(kAssignmentToLOOKUPVariable, "Assignment to LOOKUP variable") \
V(kAssignmentToParameterFunctionUsesArgumentsObject, \
"Assignment to parameter, function uses arguments object") \
V(kAssignmentToParameterInArgumentsObject, \
"Assignment to parameter in arguments object") \
V(kAttemptToUseUndefinedCache, "Attempt to use undefined cache") \
V(kBadValueContextForArgumentsObjectValue, \
"Bad value context for arguments object value") \
V(kBadValueContextForArgumentsValue, \
"Bad value context for arguments value") \
V(kBailedOutDueToDependencyChange, "Bailed out due to dependency change") \
V(kBailoutWasNotPrepared, "Bailout was not prepared") \
V(kBinaryStubGenerateFloatingPointCode, \
"BinaryStub_GenerateFloatingPointCode") \
V(kBothRegistersWereSmisInSelectNonSmi, \
"Both registers were smis in SelectNonSmi") \
V(kCallToAJavaScriptRuntimeFunction, \
"Call to a JavaScript runtime function") \
V(kCannotTranslatePositionInChangedArea, \
"Cannot translate position in changed area") \
V(kClassLiteral, "Class literal") \
V(kCodeGenerationFailed, "Code generation failed") \
V(kCodeObjectNotProperlyPatched, "Code object not properly patched") \
V(kCompoundAssignmentToLookupSlot, "Compound assignment to lookup slot") \
V(kContextAllocatedArguments, "Context-allocated arguments") \
V(kCopyBuffersOverlap, "Copy buffers overlap") \
V(kCouldNotGenerateZero, "Could not generate +0.0") \
V(kCouldNotGenerateNegativeZero, "Could not generate -0.0") \
V(kDebuggerHasBreakPoints, "Debugger has break points") \
V(kDebuggerStatement, "DebuggerStatement") \
V(kDeclarationInCatchContext, "Declaration in catch context") \
V(kDeclarationInWithContext, "Declaration in with context") \
V(kDefaultNaNModeNotSet, "Default NaN mode not set") \
V(kDeleteWithGlobalVariable, "Delete with global variable") \
V(kDeleteWithNonGlobalVariable, "Delete with non-global variable") \
V(kDestinationOfCopyNotAligned, "Destination of copy not aligned") \
V(kDontDeleteCellsCannotContainTheHole, \
"DontDelete cells can't contain the hole") \
V(kDoPushArgumentNotImplementedForDoubleType, \
"DoPushArgument not implemented for double type") \
V(kEliminatedBoundsCheckFailed, "Eliminated bounds check failed") \
V(kEmitLoadRegisterUnsupportedDoubleImmediate, \
"EmitLoadRegister: Unsupported double immediate") \
V(kEval, "eval") \
V(kExpected0AsASmiSentinel, "Expected 0 as a Smi sentinel") \
V(kExpectedAlignmentMarker, "Expected alignment marker") \
V(kExpectedAllocationSite, "Expected allocation site") \
V(kExpectedFunctionObject, "Expected function object in register") \
V(kExpectedHeapNumber, "Expected HeapNumber") \
V(kExpectedNativeContext, "Expected native context") \
V(kExpectedNonIdenticalObjects, "Expected non-identical objects") \
V(kExpectedNonNullContext, "Expected non-null context") \
V(kExpectedPositiveZero, "Expected +0.0") \
V(kExpectedAllocationSiteInCell, "Expected AllocationSite in property cell") \
V(kExpectedFixedArrayInFeedbackVector, \
"Expected fixed array in feedback vector") \
V(kExpectedFixedArrayInRegisterA2, "Expected fixed array in register a2") \
V(kExpectedFixedArrayInRegisterEbx, "Expected fixed array in register ebx") \
V(kExpectedFixedArrayInRegisterR2, "Expected fixed array in register r2") \
V(kExpectedFixedArrayInRegisterRbx, "Expected fixed array in register rbx") \
V(kExpectedNewSpaceObject, "Expected new space object") \
V(kExpectedSmiOrHeapNumber, "Expected smi or HeapNumber") \
V(kExpectedUndefinedOrCell, "Expected undefined or cell in register") \
V(kExpectingAlignmentForCopyBytes, "Expecting alignment for CopyBytes") \
V(kExportDeclaration, "Export declaration") \
V(kExternalStringExpectedButNotFound, \
"External string expected, but not found") \
V(kFailedBailedOutLastTime, "Failed/bailed out last time") \
V(kForInStatementIsNotFastCase, "ForInStatement is not fast case") \
V(kForInStatementOptimizationIsDisabled, \
"ForInStatement optimization is disabled") \
V(kForInStatementWithNonLocalEachVariable, \
"ForInStatement with non-local each variable") \
V(kForOfStatement, "ForOfStatement") \
V(kFrameIsExpectedToBeAligned, "Frame is expected to be aligned") \
V(kFunctionCallsEval, "Function calls eval") \
V(kFunctionIsAGenerator, "Function is a generator") \
V(kFunctionWithIllegalRedeclaration, "Function with illegal redeclaration") \
V(kGeneratedCodeIsTooLarge, "Generated code is too large") \
V(kGeneratorFailedToResume, "Generator failed to resume") \
V(kGenerator, "Generator") \
V(kGlobalFunctionsMustHaveInitialMap, \
"Global functions must have initial map") \
V(kHeapNumberMapRegisterClobbered, "HeapNumberMap register clobbered") \
V(kHydrogenFilter, "Optimization disabled by filter") \
V(kImportDeclaration, "Import declaration") \
V(kImproperObjectOnPrototypeChainForStore, \
"Improper object on prototype chain for store") \
V(kIndexIsNegative, "Index is negative") \
V(kIndexIsTooLarge, "Index is too large") \
V(kInlinedRuntimeFunctionClassOf, "Inlined runtime function: ClassOf") \
V(kInlinedRuntimeFunctionFastOneByteArrayJoin, \
"Inlined runtime function: FastOneByteArrayJoin") \
V(kInlinedRuntimeFunctionGeneratorNext, \
"Inlined runtime function: GeneratorNext") \
V(kInlinedRuntimeFunctionGeneratorThrow, \
"Inlined runtime function: GeneratorThrow") \
V(kInlinedRuntimeFunctionGetFromCache, \
"Inlined runtime function: GetFromCache") \
V(kInlinedRuntimeFunctionIsNonNegativeSmi, \
"Inlined runtime function: IsNonNegativeSmi") \
V(kInlinedRuntimeFunctionIsStringWrapperSafeForDefaultValueOf, \
"Inlined runtime function: IsStringWrapperSafeForDefaultValueOf") \
V(kInliningBailedOut, "Inlining bailed out") \
V(kInputGPRIsExpectedToHaveUpper32Cleared, \
"Input GPR is expected to have upper32 cleared") \
V(kInputStringTooLong, "Input string too long") \
V(kInstanceofStubUnexpectedCallSiteCacheCheck, \
"InstanceofStub unexpected call site cache (check)") \
V(kInstanceofStubUnexpectedCallSiteCacheCmp1, \
"InstanceofStub unexpected call site cache (cmp 1)") \
V(kInstanceofStubUnexpectedCallSiteCacheCmp2, \
"InstanceofStub unexpected call site cache (cmp 2)") \
V(kInstanceofStubUnexpectedCallSiteCacheMov, \
"InstanceofStub unexpected call site cache (mov)") \
V(kInteger32ToSmiFieldWritingToNonSmiLocation, \
"Integer32ToSmiField writing to non-smi location") \
V(kInvalidCaptureReferenced, "Invalid capture referenced") \
V(kInvalidElementsKindForInternalArrayOrInternalPackedArray, \
"Invalid ElementsKind for InternalArray or InternalPackedArray") \
V(kInvalidFullCodegenState, "invalid full-codegen state") \
V(kInvalidHandleScopeLevel, "Invalid HandleScope level") \
V(kInvalidLeftHandSideInAssignment, "Invalid left-hand side in assignment") \
V(kInvalidLhsInCompoundAssignment, "Invalid lhs in compound assignment") \
V(kInvalidLhsInCountOperation, "Invalid lhs in count operation") \
V(kInvalidMinLength, "Invalid min_length") \
V(kJSGlobalObjectNativeContextShouldBeANativeContext, \
"JSGlobalObject::native_context should be a native context") \
V(kJSGlobalProxyContextShouldNotBeNull, \
"JSGlobalProxy::context() should not be null") \
V(kJSObjectWithFastElementsMapHasSlowElements, \
"JSObject with fast elements map has slow elements") \
V(kLetBindingReInitialization, "Let binding re-initialization") \
V(kLhsHasBeenClobbered, "lhs has been clobbered") \
V(kLiveBytesCountOverflowChunkSize, "Live Bytes Count overflow chunk size") \
V(kLiveEdit, "LiveEdit") \
V(kLookupVariableInCountOperation, "Lookup variable in count operation") \
V(kMapBecameDeprecated, "Map became deprecated") \
V(kMapBecameUnstable, "Map became unstable") \
V(kMapIsNoLongerInEax, "Map is no longer in eax") \
V(kModuleDeclaration, "Module declaration") \
V(kModuleLiteral, "Module literal") \
V(kModulePath, "Module path") \
V(kModuleStatement, "Module statement") \
V(kModuleVariable, "Module variable") \
V(kModuleUrl, "Module url") \
V(kNativeFunctionLiteral, "Native function literal") \
V(kSuperReference, "Super reference") \
V(kNeedSmiLiteral, "Need a Smi literal here") \
V(kNoCasesLeft, "No cases left") \
V(kNoEmptyArraysHereInEmitFastOneByteArrayJoin, \
"No empty arrays here in EmitFastOneByteArrayJoin") \
V(kNonInitializerAssignmentToConst, "Non-initializer assignment to const") \
V(kNonSmiIndex, "Non-smi index") \
V(kNonSmiKeyInArrayLiteral, "Non-smi key in array literal") \
V(kNonSmiValue, "Non-smi value") \
V(kNonObject, "Non-object value") \
V(kNotEnoughVirtualRegistersForValues, \
"Not enough virtual registers for values") \
V(kNotEnoughSpillSlotsForOsr, "Not enough spill slots for OSR") \
V(kNotEnoughVirtualRegistersRegalloc, \
"Not enough virtual registers (regalloc)") \
V(kObjectFoundInSmiOnlyArray, "Object found in smi-only array") \
V(kObjectLiteralWithComplexProperty, "Object literal with complex property") \
V(kOddballInStringTableIsNotUndefinedOrTheHole, \
"Oddball in string table is not undefined or the hole") \
V(kOffsetOutOfRange, "Offset out of range") \
V(kOperandIsASmiAndNotAName, "Operand is a smi and not a name") \
V(kOperandIsASmiAndNotAString, "Operand is a smi and not a string") \
V(kOperandIsASmi, "Operand is a smi") \
V(kOperandIsNotAName, "Operand is not a name") \
V(kOperandIsNotANumber, "Operand is not a number") \
V(kOperandIsNotASmi, "Operand is not a smi") \
V(kOperandIsNotAString, "Operand is not a string") \
V(kOperandIsNotSmi, "Operand is not smi") \
V(kOperandNotANumber, "Operand not a number") \
V(kObjectTagged, "The object is tagged") \
V(kObjectNotTagged, "The object is not tagged") \
V(kOptimizationDisabled, "Optimization is disabled") \
V(kOptimizedTooManyTimes, "Optimized too many times") \
V(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister, \
"Out of virtual registers while trying to allocate temp register") \
V(kParseScopeError, "Parse/scope error") \
V(kPossibleDirectCallToEval, "Possible direct call to eval") \
V(kPreconditionsWereNotMet, "Preconditions were not met") \
V(kPropertyAllocationCountFailed, "Property allocation count failed") \
V(kReceivedInvalidReturnAddress, "Received invalid return address") \
V(kReferenceToAVariableWhichRequiresDynamicLookup, \
"Reference to a variable which requires dynamic lookup") \
V(kReferenceToGlobalLexicalVariable, "Reference to global lexical variable") \
V(kReferenceToUninitializedVariable, "Reference to uninitialized variable") \
V(kRegisterDidNotMatchExpectedRoot, "Register did not match expected root") \
V(kRegisterWasClobbered, "Register was clobbered") \
V(kRememberedSetPointerInNewSpace, "Remembered set pointer is in new space") \
V(kReturnAddressNotFoundInFrame, "Return address not found in frame") \
V(kRhsHasBeenClobbered, "Rhs has been clobbered") \
V(kScopedBlock, "ScopedBlock") \
V(kSmiAdditionOverflow, "Smi addition overflow") \
V(kSmiSubtractionOverflow, "Smi subtraction overflow") \
V(kStackAccessBelowStackPointer, "Stack access below stack pointer") \
V(kStackFrameTypesMustMatch, "Stack frame types must match") \
V(kTheCurrentStackPointerIsBelowCsp, \
"The current stack pointer is below csp") \
V(kTheInstructionShouldBeALui, "The instruction should be a lui") \
V(kTheInstructionShouldBeAnOri, "The instruction should be an ori") \
V(kTheInstructionToPatchShouldBeALoadFromConstantPool, \
"The instruction to patch should be a load from the constant pool") \
V(kTheInstructionToPatchShouldBeAnLdrLiteral, \
"The instruction to patch should be a ldr literal") \
V(kTheInstructionToPatchShouldBeALui, \
"The instruction to patch should be a lui") \
V(kTheInstructionToPatchShouldBeAnOri, \
"The instruction to patch should be an ori") \
V(kTheSourceAndDestinationAreTheSame, \
"The source and destination are the same") \
V(kTheStackPointerIsNotAligned, "The stack pointer is not aligned.") \
V(kTheStackWasCorruptedByMacroAssemblerCall, \
"The stack was corrupted by MacroAssembler::Call()") \
V(kTooManyParametersLocals, "Too many parameters/locals") \
V(kTooManyParameters, "Too many parameters") \
V(kTooManySpillSlotsNeededForOSR, "Too many spill slots needed for OSR") \
V(kToOperand32UnsupportedImmediate, "ToOperand32 unsupported immediate.") \
V(kToOperandIsDoubleRegisterUnimplemented, \
"ToOperand IsDoubleRegister unimplemented") \
V(kToOperandUnsupportedDoubleImmediate, \
"ToOperand Unsupported double immediate") \
V(kTryCatchStatement, "TryCatchStatement") \
V(kTryFinallyStatement, "TryFinallyStatement") \
V(kUnableToEncodeValueAsSmi, "Unable to encode value as smi") \
V(kUnalignedAllocationInNewSpace, "Unaligned allocation in new space") \
V(kUnalignedCellInWriteBarrier, "Unaligned cell in write barrier") \
V(kUndefinedValueNotLoaded, "Undefined value not loaded") \
V(kUndoAllocationOfNonAllocatedMemory, \
"Undo allocation of non allocated memory") \
V(kUnexpectedAllocationTop, "Unexpected allocation top") \
V(kUnexpectedColorFound, "Unexpected color bit pattern found") \
V(kUnexpectedElementsKindInArrayConstructor, \
"Unexpected ElementsKind in array constructor") \
V(kUnexpectedFallthroughFromCharCodeAtSlowCase, \
"Unexpected fallthrough from CharCodeAt slow case") \
V(kUnexpectedFallthroughFromCharFromCodeSlowCase, \
"Unexpected fallthrough from CharFromCode slow case") \
V(kUnexpectedFallThroughFromStringComparison, \
"Unexpected fall-through from string comparison") \
V(kUnexpectedFallThroughInBinaryStubGenerateFloatingPointCode, \
"Unexpected fall-through in BinaryStub_GenerateFloatingPointCode") \
V(kUnexpectedFallthroughToCharCodeAtSlowCase, \
"Unexpected fallthrough to CharCodeAt slow case") \
V(kUnexpectedFallthroughToCharFromCodeSlowCase, \
"Unexpected fallthrough to CharFromCode slow case") \
V(kUnexpectedFPUStackDepthAfterInstruction, \
"Unexpected FPU stack depth after instruction") \
V(kUnexpectedInitialMapForArrayFunction1, \
"Unexpected initial map for Array function (1)") \
V(kUnexpectedInitialMapForArrayFunction2, \
"Unexpected initial map for Array function (2)") \
V(kUnexpectedInitialMapForArrayFunction, \
"Unexpected initial map for Array function") \
V(kUnexpectedInitialMapForInternalArrayFunction, \
"Unexpected initial map for InternalArray function") \
V(kUnexpectedLevelAfterReturnFromApiCall, \
"Unexpected level after return from api call") \
V(kUnexpectedNegativeValue, "Unexpected negative value") \
V(kUnexpectedNumberOfPreAllocatedPropertyFields, \
"Unexpected number of pre-allocated property fields") \
V(kUnexpectedFPCRMode, "Unexpected FPCR mode.") \
V(kUnexpectedSmi, "Unexpected smi value") \
V(kUnexpectedStringFunction, "Unexpected String function") \
V(kUnexpectedStringType, "Unexpected string type") \
V(kUnexpectedStringWrapperInstanceSize, \
"Unexpected string wrapper instance size") \
V(kUnexpectedTypeForRegExpDataFixedArrayExpected, \
"Unexpected type for RegExp data, FixedArray expected") \
V(kUnexpectedValue, "Unexpected value") \
V(kUnexpectedUnusedPropertiesOfStringWrapper, \
"Unexpected unused properties of string wrapper") \
V(kUnimplemented, "unimplemented") \
V(kUninitializedKSmiConstantRegister, "Uninitialized kSmiConstantRegister") \
V(kUnknown, "Unknown") \
V(kUnsupportedConstCompoundAssignment, \
"Unsupported const compound assignment") \
V(kUnsupportedCountOperationWithConst, \
"Unsupported count operation with const") \
V(kUnsupportedDoubleImmediate, "Unsupported double immediate") \
V(kUnsupportedLetCompoundAssignment, "Unsupported let compound assignment") \
V(kUnsupportedLookupSlotInDeclaration, \
"Unsupported lookup slot in declaration") \
V(kUnsupportedNonPrimitiveCompare, "Unsupported non-primitive compare") \
V(kUnsupportedPhiUseOfArguments, "Unsupported phi use of arguments") \
V(kUnsupportedPhiUseOfConstVariable, \
"Unsupported phi use of const variable") \
V(kUnsupportedTaggedImmediate, "Unsupported tagged immediate") \
V(kVariableResolvedToWithContext, "Variable resolved to with context") \
V(kWeShouldNotHaveAnEmptyLexicalContext, \
"We should not have an empty lexical context") \
V(kWithStatement, "WithStatement") \
V(kWrongFunctionContext, "Wrong context passed to function") \
V(kWrongAddressOrValuePassedToRecordWrite, \
"Wrong address or value passed to RecordWrite") \
V(kYield, "Yield")
#define ERROR_MESSAGES_CONSTANTS(C, T) C,
enum BailoutReason {
ERROR_MESSAGES_LIST(ERROR_MESSAGES_CONSTANTS) kLastErrorMessage
};
#undef ERROR_MESSAGES_CONSTANTS
const char* GetBailoutReason(BailoutReason reason);
}
} // namespace v8::internal
#endif // V8_BAILOUT_REASON_H_

View File

@ -27,16 +27,16 @@ inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
__asm__ __volatile__(".set push\n" __asm__ __volatile__(".set push\n"
".set noreorder\n" ".set noreorder\n"
"1:\n" "1:\n"
"ll %0, %5\n" // prev = *ptr "ll %0, 0(%4)\n" // prev = *ptr
"bne %0, %3, 2f\n" // if (prev != old_value) goto 2 "bne %0, %2, 2f\n" // if (prev != old_value) goto 2
"move %2, %4\n" // tmp = new_value "move %1, %3\n" // tmp = new_value
"sc %2, %1\n" // *ptr = tmp (with atomic check) "sc %1, 0(%4)\n" // *ptr = tmp (with atomic check)
"beqz %2, 1b\n" // start again on atomic error "beqz %1, 1b\n" // start again on atomic error
"nop\n" // delay slot nop "nop\n" // delay slot nop
"2:\n" "2:\n"
".set pop\n" ".set pop\n"
: "=&r" (prev), "=m" (*ptr), "=&r" (tmp) : "=&r" (prev), "=&r" (tmp)
: "Ir" (old_value), "r" (new_value), "m" (*ptr) : "Ir" (old_value), "r" (new_value), "r" (ptr)
: "memory"); : "memory");
return prev; return prev;
} }
@ -48,15 +48,16 @@ inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 temp, old; Atomic32 temp, old;
__asm__ __volatile__(".set push\n" __asm__ __volatile__(".set push\n"
".set noreorder\n" ".set noreorder\n"
".set at\n"
"1:\n" "1:\n"
"ll %1, %2\n" // old = *ptr "ll %1, 0(%3)\n" // old = *ptr
"move %0, %3\n" // temp = new_value "move %0, %2\n" // temp = new_value
"sc %0, %2\n" // *ptr = temp (with atomic check) "sc %0, 0(%3)\n" // *ptr = temp (with atomic check)
"beqz %0, 1b\n" // start again on atomic error "beqz %0, 1b\n" // start again on atomic error
"nop\n" // delay slot nop "nop\n" // delay slot nop
".set pop\n" ".set pop\n"
: "=&r" (temp), "=&r" (old), "=m" (*ptr) : "=&r" (temp), "=&r" (old)
: "r" (new_value), "m" (*ptr) : "r" (new_value), "r" (ptr)
: "memory"); : "memory");
return old; return old;
@ -71,14 +72,14 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
__asm__ __volatile__(".set push\n" __asm__ __volatile__(".set push\n"
".set noreorder\n" ".set noreorder\n"
"1:\n" "1:\n"
"ll %0, %2\n" // temp = *ptr "ll %0, 0(%3)\n" // temp = *ptr
"addu %1, %0, %3\n" // temp2 = temp + increment "addu %1, %0, %2\n" // temp2 = temp + increment
"sc %1, %2\n" // *ptr = temp2 (with atomic check) "sc %1, 0(%3)\n" // *ptr = temp2 (with atomic check)
"beqz %1, 1b\n" // start again on atomic error "beqz %1, 1b\n" // start again on atomic error
"addu %1, %0, %3\n" // temp2 = temp + increment "addu %1, %0, %2\n" // temp2 = temp + increment
".set pop\n" ".set pop\n"
: "=&r" (temp), "=&r" (temp2), "=m" (*ptr) : "=&r" (temp), "=&r" (temp2)
: "Ir" (increment), "m" (*ptr) : "Ir" (increment), "r" (ptr)
: "memory"); : "memory");
// temp2 now holds the final value. // temp2 now holds the final value.
return temp2; return temp2;

View File

@ -20,11 +20,16 @@
'../..', '../..',
], ],
'sources': [ ### gcmole(all) ### 'sources': [ ### gcmole(all) ###
'bits-unittest.cc',
'cpu-unittest.cc', 'cpu-unittest.cc',
'division-by-constant-unittest.cc',
'flags-unittest.cc',
'platform/condition-variable-unittest.cc', 'platform/condition-variable-unittest.cc',
'platform/mutex-unittest.cc', 'platform/mutex-unittest.cc',
'platform/platform-unittest.cc', 'platform/platform-unittest.cc',
'platform/semaphore-unittest.cc',
'platform/time-unittest.cc', 'platform/time-unittest.cc',
'sys-info-unittest.cc',
'utils/random-number-generator-unittest.cc', 'utils/random-number-generator-unittest.cc',
], ],
'conditions': [ 'conditions': [

167
deps/v8/src/base/bits-unittest.cc vendored Normal file
View File

@ -0,0 +1,167 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <limits>
#include "src/base/bits.h"
#include "src/base/macros.h"
#include "testing/gtest-support.h"
#ifdef DEBUG
#define DISABLE_IN_RELEASE(Name) Name
#else
#define DISABLE_IN_RELEASE(Name) DISABLED_##Name
#endif
namespace v8 {
namespace base {
namespace bits {
TEST(Bits, CountPopulation32) {
EXPECT_EQ(0u, CountPopulation32(0));
EXPECT_EQ(1u, CountPopulation32(1));
EXPECT_EQ(8u, CountPopulation32(0x11111111));
EXPECT_EQ(16u, CountPopulation32(0xf0f0f0f0));
EXPECT_EQ(24u, CountPopulation32(0xfff0f0ff));
EXPECT_EQ(32u, CountPopulation32(0xffffffff));
}
TEST(Bits, CountLeadingZeros32) {
EXPECT_EQ(32u, CountLeadingZeros32(0));
EXPECT_EQ(31u, CountLeadingZeros32(1));
TRACED_FORRANGE(uint32_t, shift, 0, 31) {
EXPECT_EQ(31u - shift, CountLeadingZeros32(1u << shift));
}
EXPECT_EQ(4u, CountLeadingZeros32(0x0f0f0f0f));
}
TEST(Bits, CountTrailingZeros32) {
EXPECT_EQ(32u, CountTrailingZeros32(0));
EXPECT_EQ(31u, CountTrailingZeros32(0x80000000));
TRACED_FORRANGE(uint32_t, shift, 0, 31) {
EXPECT_EQ(shift, CountTrailingZeros32(1u << shift));
}
EXPECT_EQ(4u, CountTrailingZeros32(0xf0f0f0f0));
}
TEST(Bits, IsPowerOfTwo32) {
EXPECT_FALSE(IsPowerOfTwo32(0U));
TRACED_FORRANGE(uint32_t, shift, 0, 31) {
EXPECT_TRUE(IsPowerOfTwo32(1U << shift));
EXPECT_FALSE(IsPowerOfTwo32((1U << shift) + 5U));
EXPECT_FALSE(IsPowerOfTwo32(~(1U << shift)));
}
TRACED_FORRANGE(uint32_t, shift, 2, 31) {
EXPECT_FALSE(IsPowerOfTwo32((1U << shift) - 1U));
}
EXPECT_FALSE(IsPowerOfTwo32(0xffffffff));
}
TEST(Bits, IsPowerOfTwo64) {
EXPECT_FALSE(IsPowerOfTwo64(0U));
TRACED_FORRANGE(uint32_t, shift, 0, 63) {
EXPECT_TRUE(IsPowerOfTwo64(V8_UINT64_C(1) << shift));
EXPECT_FALSE(IsPowerOfTwo64((V8_UINT64_C(1) << shift) + 5U));
EXPECT_FALSE(IsPowerOfTwo64(~(V8_UINT64_C(1) << shift)));
}
TRACED_FORRANGE(uint32_t, shift, 2, 63) {
EXPECT_FALSE(IsPowerOfTwo64((V8_UINT64_C(1) << shift) - 1U));
}
EXPECT_FALSE(IsPowerOfTwo64(V8_UINT64_C(0xffffffffffffffff)));
}
TEST(Bits, RoundUpToPowerOfTwo32) {
TRACED_FORRANGE(uint32_t, shift, 0, 31) {
EXPECT_EQ(1u << shift, RoundUpToPowerOfTwo32(1u << shift));
}
EXPECT_EQ(0u, RoundUpToPowerOfTwo32(0));
EXPECT_EQ(4u, RoundUpToPowerOfTwo32(3));
EXPECT_EQ(0x80000000u, RoundUpToPowerOfTwo32(0x7fffffffu));
}
TEST(BitsDeathTest, DISABLE_IN_RELEASE(RoundUpToPowerOfTwo32)) {
ASSERT_DEATH_IF_SUPPORTED({ RoundUpToPowerOfTwo32(0x80000001u); },
"0x80000000");
}
TEST(Bits, RoundDownToPowerOfTwo32) {
TRACED_FORRANGE(uint32_t, shift, 0, 31) {
EXPECT_EQ(1u << shift, RoundDownToPowerOfTwo32(1u << shift));
}
EXPECT_EQ(0u, RoundDownToPowerOfTwo32(0));
EXPECT_EQ(4u, RoundDownToPowerOfTwo32(5));
EXPECT_EQ(0x80000000u, RoundDownToPowerOfTwo32(0x80000001u));
}
TEST(Bits, RotateRight32) {
TRACED_FORRANGE(uint32_t, shift, 0, 31) {
EXPECT_EQ(0u, RotateRight32(0u, shift));
}
EXPECT_EQ(1u, RotateRight32(1, 0));
EXPECT_EQ(1u, RotateRight32(2, 1));
EXPECT_EQ(0x80000000u, RotateRight32(1, 1));
}
TEST(Bits, RotateRight64) {
TRACED_FORRANGE(uint64_t, shift, 0, 63) {
EXPECT_EQ(0u, RotateRight64(0u, shift));
}
EXPECT_EQ(1u, RotateRight64(1, 0));
EXPECT_EQ(1u, RotateRight64(2, 1));
EXPECT_EQ(V8_UINT64_C(0x8000000000000000), RotateRight64(1, 1));
}
TEST(Bits, SignedAddOverflow32) {
int32_t val = 0;
EXPECT_FALSE(SignedAddOverflow32(0, 0, &val));
EXPECT_EQ(0, val);
EXPECT_TRUE(
SignedAddOverflow32(std::numeric_limits<int32_t>::max(), 1, &val));
EXPECT_EQ(std::numeric_limits<int32_t>::min(), val);
EXPECT_TRUE(
SignedAddOverflow32(std::numeric_limits<int32_t>::min(), -1, &val));
EXPECT_EQ(std::numeric_limits<int32_t>::max(), val);
EXPECT_TRUE(SignedAddOverflow32(std::numeric_limits<int32_t>::max(),
std::numeric_limits<int32_t>::max(), &val));
EXPECT_EQ(-2, val);
TRACED_FORRANGE(int32_t, i, 1, 50) {
TRACED_FORRANGE(int32_t, j, 1, i) {
EXPECT_FALSE(SignedAddOverflow32(i, j, &val));
EXPECT_EQ(i + j, val);
}
}
}
TEST(Bits, SignedSubOverflow32) {
int32_t val = 0;
EXPECT_FALSE(SignedSubOverflow32(0, 0, &val));
EXPECT_EQ(0, val);
EXPECT_TRUE(
SignedSubOverflow32(std::numeric_limits<int32_t>::min(), 1, &val));
EXPECT_EQ(std::numeric_limits<int32_t>::max(), val);
EXPECT_TRUE(
SignedSubOverflow32(std::numeric_limits<int32_t>::max(), -1, &val));
EXPECT_EQ(std::numeric_limits<int32_t>::min(), val);
TRACED_FORRANGE(int32_t, i, 1, 50) {
TRACED_FORRANGE(int32_t, j, 1, i) {
EXPECT_FALSE(SignedSubOverflow32(i, j, &val));
EXPECT_EQ(i - j, val);
}
}
}
} // namespace bits
} // namespace base
} // namespace v8

25
deps/v8/src/base/bits.cc vendored Normal file
View File

@ -0,0 +1,25 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/base/bits.h"
#include "src/base/logging.h"
namespace v8 {
namespace base {
namespace bits {
uint32_t RoundUpToPowerOfTwo32(uint32_t value) {
DCHECK_LE(value, 0x80000000u);
value = value - 1;
value = value | (value >> 1);
value = value | (value >> 2);
value = value | (value >> 4);
value = value | (value >> 8);
value = value | (value >> 16);
return value + 1;
}
} // namespace bits
} // namespace base
} // namespace v8

Some files were not shown because too many files have changed in this diff Show More