Merge branch 'v0.4'
Conflicts: src/node_version.h
This commit is contained in:
commit
247d880113
44
ChangeLog
44
ChangeLog
@ -1,4 +1,46 @@
|
||||
2011.02.19, Version 0.4.1 (stable)
|
||||
2011.03.02, Version 0.4.2 (stable)
|
||||
|
||||
* Improve docs.
|
||||
|
||||
* Fix process.on edge case with signal event (Alexis Sellier)
|
||||
|
||||
* Pragma HTTP header comma separation
|
||||
|
||||
* In addition to 'aborted' emit 'close' from incoming requests
|
||||
(Felix Geisendörfer)
|
||||
|
||||
* Fix memleak in vm.runInNewContext
|
||||
|
||||
* Do not cache modules that throw exceptions (Felix Geisendörfer)
|
||||
|
||||
* Build system changes for libnode (Aria Stewart)
|
||||
|
||||
* Read up the prototype of the 'env' object. (Nathan Rajlich)
|
||||
|
||||
* Add 'close' and 'aborted' events to Agent responses
|
||||
|
||||
* http: fix missing 'drain' events (Russell Haering)
|
||||
|
||||
* Fix process.stdout.end() throws ENOTSOCK error. (Koichi Kobayashi)
|
||||
|
||||
* REPL bug fixes (isaacs)
|
||||
|
||||
* node_modules folders should be highest priority (isaacs)
|
||||
|
||||
* URL parse more safely (isaacs)
|
||||
|
||||
* Expose errno with a string for dns/cares (Felix Geisendörfer)
|
||||
|
||||
* Fix tty.setWindowSize
|
||||
|
||||
* spawn: setuid after chdir (isaacs)
|
||||
|
||||
* SIGUSR1 should break the VM without delay
|
||||
|
||||
* Upgrade V8 to 3.1.8.
|
||||
|
||||
|
||||
2011.02.19, Version 0.4.1 (stable), e8aef84191bc2c1ba2bcaa54f30aabde7f03769b
|
||||
|
||||
* Fixed field merging with progressive fields on writeHead()
|
||||
(TJ Holowaychuk)
|
||||
|
93
LICENSE
93
LICENSE
@ -1,49 +1,8 @@
|
||||
This license applies to all parts of Node that are not externally
|
||||
maintained libraries. The externally maintained libraries used by Node
|
||||
are:
|
||||
|
||||
- v8, located under deps/v8, which is copyrighted by the Google, Inc.
|
||||
v8 has a BSD license.
|
||||
|
||||
- libev, located under deps/libev, and libeio, located at deps/libeio.
|
||||
This code is copyrighted by Marc Alexander Lehmann. Both are dually
|
||||
licensed under MIT and GPL2.
|
||||
|
||||
- WAF build system, located at tools/waf. Copyrighted Thomas Nagy.
|
||||
Released under an MIT license.
|
||||
|
||||
- The SCONS build system, located at tools/scons. Copyrighted by the SCONS
|
||||
Foundation. Released under an MIT license.
|
||||
|
||||
- C-Ares, an asynchronous DNS client, located at deps/c-ares. Copyright by
|
||||
the Massachusetts Institute of Technology; authored by Greg Hudson,
|
||||
Daniel Stenberg and others. Released under an MIT license.
|
||||
|
||||
- Node, optionally, dynmaically links to OpenSSL, cryptographic software
|
||||
written by Eric Young (eay@cryptsoft.com) to provide SSL/TLS encryption.
|
||||
OpenSSL is copyrighted by The OpenSSL Project. OpenSSL has a simple
|
||||
Apache-style license. OpenSSL is not included in the Node distribution.
|
||||
See http://openssl.org/ for more information.
|
||||
|
||||
- tools/doctool/markdown.js is Released under MIT license and
|
||||
Copyright 2009-2010 Dominic Baggott and Ash Berli
|
||||
|
||||
- HTTP Parser, located at deps/http_parser, is a small C library
|
||||
copyrighted by Ryan Lienhart Dahl and has a MIT license.
|
||||
|
||||
- src/platform_darwin_proctitle.cc, has code taken from the Chromium
|
||||
project copyright Google Inc. and released with the BSD license.
|
||||
|
||||
- tools/closure_linter is copyrighted by The Closure Linter Authors and
|
||||
Google Inc and is released under the Apache license.
|
||||
|
||||
- tools/cpplint.py is copyrighted by Google Inc and is released under the
|
||||
BSD license.
|
||||
|
||||
|
||||
Node's license follows:
|
||||
|
||||
Copyright 2009, 2010 Ryan Lienhart Dahl. All rights reserved.
|
||||
====
|
||||
|
||||
Copyright Joyent, Inc. and other Node contributors. All rights reserved.
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to
|
||||
deal in the Software without restriction, including without limitation the
|
||||
@ -61,3 +20,49 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
IN THE SOFTWARE.
|
||||
|
||||
====
|
||||
|
||||
This license applies to all parts of Node that are not externally
|
||||
maintained libraries.
|
||||
|
||||
The externally maintained libraries used by Node are:
|
||||
|
||||
- v8, located at deps/v8. v8 is copyright Google, Inc, and released
|
||||
under a BSD license.
|
||||
|
||||
- libev, located at deps/libev, and libeio, located at deps/libeio. libev
|
||||
and libeio are copyright Marc Alexander Lehmann, and dual-licensed
|
||||
under the MIT license and GPL2.
|
||||
|
||||
- WAF build system, located at tools/waf. WAF is copyright Thomas Nagy,
|
||||
and released under the MIT license.
|
||||
|
||||
- The SCONS build system, located at tools/scons. SCONS is copyright
|
||||
the SCONS Foundation and released under the MIT license.
|
||||
|
||||
- C-Ares, an asynchronous DNS client, located at deps/c-ares. C-Ares is
|
||||
copyright the Massachusetts Institute of Technology, authored by
|
||||
Greg Hudson, Daniel Stenberg and others, and released under the MIT
|
||||
license.
|
||||
|
||||
- Node, optionally, dynamically links to OpenSSL, cryptographic software
|
||||
written by Eric Young (eay@cryptsoft.com) to provide SSL/TLS encryption.
|
||||
OpenSSL is copyright The OpenSSL Project and released under the OpenSSL
|
||||
license. OpenSSL is not included in the Node distribution.
|
||||
See http://openssl.org/ for more information.
|
||||
|
||||
- tools/doctool/markdown.js is copyright 2009-2010 Dominic Baggott and Ash
|
||||
Berli and released under the MIT license.
|
||||
|
||||
- HTTP Parser, located at deps/http_parser, is a small C library
|
||||
copyright Ryan Lienhart Dahl and released under the MIT license.
|
||||
|
||||
- src/platform_darwin_proctitle.cc, has code taken from the Chromium
|
||||
project copyright Google Inc. and released under a BSD license.
|
||||
|
||||
- tools/closure_linter is copyright The Closure Linter Authors and
|
||||
Google Inc. and released under the Apache License, version 2.0.
|
||||
|
||||
- tools/cpplint.py is copyright Google Inc. and released under a
|
||||
BSD license.
|
||||
|
@ -62,6 +62,8 @@ endif()
|
||||
|
||||
if(${node_platform} MATCHES darwin)
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -framework Carbon")
|
||||
# explicitly set this so that we don't check again when building libeio
|
||||
set(HAVE_FDATASYNC 0)
|
||||
else()
|
||||
# OSX fdatasync() check wrong: http://public.kitware.com/Bug/view.php?id=10044
|
||||
check_function_exists(fdatasync HAVE_FDATASYNC)
|
||||
|
@ -62,6 +62,7 @@ set(node_sources
|
||||
src/node_script.cc
|
||||
src/node_os.cc
|
||||
src/node_dtrace.cc
|
||||
src/node_string.cc
|
||||
src/node_natives.h
|
||||
${node_extra_src})
|
||||
|
||||
|
27
deps/v8/ChangeLog
vendored
27
deps/v8/ChangeLog
vendored
@ -1,3 +1,30 @@
|
||||
2011-03-02: Version 3.1.8
|
||||
|
||||
Fixed a number of crash bugs.
|
||||
|
||||
Improved Crankshaft for x64 and ARM.
|
||||
|
||||
Implemented more of EcmaScript 5 strict mode.
|
||||
|
||||
Fixed issue with unaligned reads and writes on ARM.
|
||||
|
||||
Improved heap profiler support.
|
||||
|
||||
|
||||
2011-02-28: Version 3.1.7
|
||||
|
||||
Fixed a number of crash bugs.
|
||||
|
||||
Improved Crankshaft for x64 and ARM.
|
||||
|
||||
Fixed implementation of indexOf/lastIndexOf for sparse
|
||||
arrays (http://crbug.com/73940).
|
||||
|
||||
Fixed bug in map space compaction (http://crbug.com/59688).
|
||||
|
||||
Added support for direct getter accessors calls on ARM.
|
||||
|
||||
|
||||
2011-02-24: Version 3.1.6
|
||||
|
||||
Fixed a number of crash bugs.
|
||||
|
1
deps/v8/SConstruct
vendored
1
deps/v8/SConstruct
vendored
@ -306,7 +306,6 @@ V8_EXTRA_FLAGS = {
|
||||
'gcc': {
|
||||
'all': {
|
||||
'WARNINGFLAGS': ['-Wall',
|
||||
'-Werror',
|
||||
'-W',
|
||||
'-Wno-unused-parameter',
|
||||
'-Wnon-virtual-dtor']
|
||||
|
6
deps/v8/src/api.cc
vendored
6
deps/v8/src/api.cc
vendored
@ -2286,7 +2286,8 @@ bool v8::Object::Set(v8::Handle<Value> key, v8::Handle<Value> value,
|
||||
self,
|
||||
key_obj,
|
||||
value_obj,
|
||||
static_cast<PropertyAttributes>(attribs));
|
||||
static_cast<PropertyAttributes>(attribs),
|
||||
i::kNonStrictMode);
|
||||
has_pending_exception = obj.is_null();
|
||||
EXCEPTION_BAILOUT_CHECK(false);
|
||||
return true;
|
||||
@ -2711,7 +2712,8 @@ bool v8::Object::SetHiddenValue(v8::Handle<v8::String> key,
|
||||
hidden_props,
|
||||
key_obj,
|
||||
value_obj,
|
||||
static_cast<PropertyAttributes>(None));
|
||||
static_cast<PropertyAttributes>(None),
|
||||
i::kNonStrictMode);
|
||||
has_pending_exception = obj.is_null();
|
||||
EXCEPTION_BAILOUT_CHECK(false);
|
||||
return true;
|
||||
|
1
deps/v8/src/arm/assembler-arm.h
vendored
1
deps/v8/src/arm/assembler-arm.h
vendored
@ -284,6 +284,7 @@ const SwVfpRegister s29 = { 29 };
|
||||
const SwVfpRegister s30 = { 30 };
|
||||
const SwVfpRegister s31 = { 31 };
|
||||
|
||||
const DwVfpRegister no_dreg = { -1 };
|
||||
const DwVfpRegister d0 = { 0 };
|
||||
const DwVfpRegister d1 = { 1 };
|
||||
const DwVfpRegister d2 = { 2 };
|
||||
|
712
deps/v8/src/arm/code-stubs-arm.cc
vendored
712
deps/v8/src/arm/code-stubs-arm.cc
vendored
@ -398,8 +398,11 @@ class FloatingPointHelper : public AllStatic {
|
||||
Label* not_number);
|
||||
|
||||
// Loads the number from object into dst as a 32-bit integer if possible. If
|
||||
// the object is not a 32-bit integer control continues at the label
|
||||
// not_int32. If VFP is supported double_scratch is used but not scratch2.
|
||||
// the object cannot be converted to a 32-bit integer control continues at
|
||||
// the label not_int32. If VFP is supported double_scratch is used
|
||||
// but not scratch2.
|
||||
// Floating point value in the 32-bit integer range will be rounded
|
||||
// to an integer.
|
||||
static void LoadNumberAsInteger(MacroAssembler* masm,
|
||||
Register object,
|
||||
Register dst,
|
||||
@ -409,6 +412,76 @@ class FloatingPointHelper : public AllStatic {
|
||||
DwVfpRegister double_scratch,
|
||||
Label* not_int32);
|
||||
|
||||
// Load the number from object into double_dst in the double format.
|
||||
// Control will jump to not_int32 if the value cannot be exactly represented
|
||||
// by a 32-bit integer.
|
||||
// Floating point value in the 32-bit integer range that are not exact integer
|
||||
// won't be loaded.
|
||||
static void LoadNumberAsInt32Double(MacroAssembler* masm,
|
||||
Register object,
|
||||
Destination destination,
|
||||
DwVfpRegister double_dst,
|
||||
Register dst1,
|
||||
Register dst2,
|
||||
Register heap_number_map,
|
||||
Register scratch1,
|
||||
Register scratch2,
|
||||
SwVfpRegister single_scratch,
|
||||
Label* not_int32);
|
||||
|
||||
// Loads the number from object into dst as a 32-bit integer.
|
||||
// Control will jump to not_int32 if the object cannot be exactly represented
|
||||
// by a 32-bit integer.
|
||||
// Floating point value in the 32-bit integer range that are not exact integer
|
||||
// won't be converted.
|
||||
// scratch3 is not used when VFP3 is supported.
|
||||
static void LoadNumberAsInt32(MacroAssembler* masm,
|
||||
Register object,
|
||||
Register dst,
|
||||
Register heap_number_map,
|
||||
Register scratch1,
|
||||
Register scratch2,
|
||||
Register scratch3,
|
||||
DwVfpRegister double_scratch,
|
||||
Label* not_int32);
|
||||
|
||||
// Generate non VFP3 code to check if a double can be exactly represented by a
|
||||
// 32-bit integer. This does not check for 0 or -0, which need
|
||||
// to be checked for separately.
|
||||
// Control jumps to not_int32 if the value is not a 32-bit integer, and falls
|
||||
// through otherwise.
|
||||
// src1 and src2 will be cloberred.
|
||||
//
|
||||
// Expected input:
|
||||
// - src1: higher (exponent) part of the double value.
|
||||
// - src2: lower (mantissa) part of the double value.
|
||||
// Output status:
|
||||
// - dst: 32 higher bits of the mantissa. (mantissa[51:20])
|
||||
// - src2: contains 1.
|
||||
// - other registers are clobbered.
|
||||
static void DoubleIs32BitInteger(MacroAssembler* masm,
|
||||
Register src1,
|
||||
Register src2,
|
||||
Register dst,
|
||||
Register scratch,
|
||||
Label* not_int32);
|
||||
|
||||
// Generates code to call a C function to do a double operation using core
|
||||
// registers. (Used when VFP3 is not supported.)
|
||||
// This code never falls through, but returns with a heap number containing
|
||||
// the result in r0.
|
||||
// Register heapnumber_result must be a heap number in which the
|
||||
// result of the operation will be stored.
|
||||
// Requires the following layout on entry:
|
||||
// r0: Left value (least significant part of mantissa).
|
||||
// r1: Left value (sign, exponent, top of mantissa).
|
||||
// r2: Right value (least significant part of mantissa).
|
||||
// r3: Right value (sign, exponent, top of mantissa).
|
||||
static void CallCCodeForDoubleOperation(MacroAssembler* masm,
|
||||
Token::Value op,
|
||||
Register heap_number_result,
|
||||
Register scratch);
|
||||
|
||||
private:
|
||||
static void LoadNumber(MacroAssembler* masm,
|
||||
FloatingPointHelper::Destination destination,
|
||||
@ -560,6 +633,319 @@ void FloatingPointHelper::LoadNumberAsInteger(MacroAssembler* masm,
|
||||
}
|
||||
|
||||
|
||||
void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
|
||||
Register object,
|
||||
Destination destination,
|
||||
DwVfpRegister double_dst,
|
||||
Register dst1,
|
||||
Register dst2,
|
||||
Register heap_number_map,
|
||||
Register scratch1,
|
||||
Register scratch2,
|
||||
SwVfpRegister single_scratch,
|
||||
Label* not_int32) {
|
||||
ASSERT(!scratch1.is(object) && !scratch2.is(object));
|
||||
ASSERT(!scratch1.is(scratch2));
|
||||
ASSERT(!heap_number_map.is(object) &&
|
||||
!heap_number_map.is(scratch1) &&
|
||||
!heap_number_map.is(scratch2));
|
||||
|
||||
Label done, obj_is_not_smi;
|
||||
|
||||
__ JumpIfNotSmi(object, &obj_is_not_smi);
|
||||
__ SmiUntag(scratch1, object);
|
||||
if (CpuFeatures::IsSupported(VFP3)) {
|
||||
CpuFeatures::Scope scope(VFP3);
|
||||
__ vmov(single_scratch, scratch1);
|
||||
__ vcvt_f64_s32(double_dst, single_scratch);
|
||||
if (destination == kCoreRegisters) {
|
||||
__ vmov(dst1, dst2, double_dst);
|
||||
}
|
||||
} else {
|
||||
Label fewer_than_20_useful_bits;
|
||||
// Expected output:
|
||||
// | dst1 | dst2 |
|
||||
// | s | exp | mantissa |
|
||||
|
||||
// Check for zero.
|
||||
__ cmp(scratch1, Operand(0));
|
||||
__ mov(dst1, scratch1);
|
||||
__ mov(dst2, scratch1);
|
||||
__ b(eq, &done);
|
||||
|
||||
// Preload the sign of the value.
|
||||
__ and_(dst1, scratch1, Operand(HeapNumber::kSignMask), SetCC);
|
||||
// Get the absolute value of the object (as an unsigned integer).
|
||||
__ rsb(scratch1, scratch1, Operand(0), SetCC, mi);
|
||||
|
||||
// Get mantisssa[51:20].
|
||||
|
||||
// Get the position of the first set bit.
|
||||
__ CountLeadingZeros(dst2, scratch1, scratch2);
|
||||
__ rsb(dst2, dst2, Operand(31));
|
||||
|
||||
// Set the exponent.
|
||||
__ add(scratch2, dst2, Operand(HeapNumber::kExponentBias));
|
||||
__ Bfi(dst1, scratch2, scratch2,
|
||||
HeapNumber::kExponentShift, HeapNumber::kExponentBits);
|
||||
|
||||
// Clear the first non null bit.
|
||||
__ mov(scratch2, Operand(1));
|
||||
__ bic(scratch1, scratch1, Operand(scratch2, LSL, dst2));
|
||||
|
||||
__ cmp(dst2, Operand(HeapNumber::kMantissaBitsInTopWord));
|
||||
// Get the number of bits to set in the lower part of the mantissa.
|
||||
__ sub(scratch2, dst2, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
|
||||
__ b(mi, &fewer_than_20_useful_bits);
|
||||
// Set the higher 20 bits of the mantissa.
|
||||
__ orr(dst1, dst1, Operand(scratch1, LSR, scratch2));
|
||||
__ rsb(scratch2, scratch2, Operand(32));
|
||||
__ mov(dst2, Operand(scratch1, LSL, scratch2));
|
||||
__ b(&done);
|
||||
|
||||
__ bind(&fewer_than_20_useful_bits);
|
||||
__ rsb(scratch2, dst2, Operand(HeapNumber::kMantissaBitsInTopWord));
|
||||
__ mov(scratch2, Operand(scratch1, LSL, scratch2));
|
||||
__ orr(dst1, dst1, scratch2);
|
||||
// Set dst2 to 0.
|
||||
__ mov(dst2, Operand(0));
|
||||
}
|
||||
|
||||
__ b(&done);
|
||||
|
||||
__ bind(&obj_is_not_smi);
|
||||
if (FLAG_debug_code) {
|
||||
__ AbortIfNotRootValue(heap_number_map,
|
||||
Heap::kHeapNumberMapRootIndex,
|
||||
"HeapNumberMap register clobbered.");
|
||||
}
|
||||
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
|
||||
|
||||
// Load the number.
|
||||
if (CpuFeatures::IsSupported(VFP3)) {
|
||||
CpuFeatures::Scope scope(VFP3);
|
||||
// Load the double value.
|
||||
__ sub(scratch1, object, Operand(kHeapObjectTag));
|
||||
__ vldr(double_dst, scratch1, HeapNumber::kValueOffset);
|
||||
|
||||
__ EmitVFPTruncate(kRoundToZero,
|
||||
single_scratch,
|
||||
double_dst,
|
||||
scratch1,
|
||||
scratch2,
|
||||
kCheckForInexactConversion);
|
||||
|
||||
// Jump to not_int32 if the operation did not succeed.
|
||||
__ b(ne, not_int32);
|
||||
|
||||
if (destination == kCoreRegisters) {
|
||||
__ vmov(dst1, dst2, double_dst);
|
||||
}
|
||||
|
||||
} else {
|
||||
ASSERT(!scratch1.is(object) && !scratch2.is(object));
|
||||
// Load the double value in the destination registers..
|
||||
__ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
|
||||
|
||||
// Check for 0 and -0.
|
||||
__ bic(scratch1, dst1, Operand(HeapNumber::kSignMask));
|
||||
__ orr(scratch1, scratch1, Operand(dst2));
|
||||
__ cmp(scratch1, Operand(0));
|
||||
__ b(eq, &done);
|
||||
|
||||
// Check that the value can be exactly represented by a 32-bit integer.
|
||||
// Jump to not_int32 if that's not the case.
|
||||
DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
|
||||
|
||||
// dst1 and dst2 were trashed. Reload the double value.
|
||||
__ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
|
||||
}
|
||||
|
||||
__ bind(&done);
|
||||
}
|
||||
|
||||
|
||||
void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
|
||||
Register object,
|
||||
Register dst,
|
||||
Register heap_number_map,
|
||||
Register scratch1,
|
||||
Register scratch2,
|
||||
Register scratch3,
|
||||
DwVfpRegister double_scratch,
|
||||
Label* not_int32) {
|
||||
ASSERT(!dst.is(object));
|
||||
ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
|
||||
ASSERT(!scratch1.is(scratch2) &&
|
||||
!scratch1.is(scratch3) &&
|
||||
!scratch2.is(scratch3));
|
||||
|
||||
Label done;
|
||||
|
||||
// Untag the object into the destination register.
|
||||
__ SmiUntag(dst, object);
|
||||
// Just return if the object is a smi.
|
||||
__ JumpIfSmi(object, &done);
|
||||
|
||||
if (FLAG_debug_code) {
|
||||
__ AbortIfNotRootValue(heap_number_map,
|
||||
Heap::kHeapNumberMapRootIndex,
|
||||
"HeapNumberMap register clobbered.");
|
||||
}
|
||||
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
|
||||
|
||||
// Object is a heap number.
|
||||
// Convert the floating point value to a 32-bit integer.
|
||||
if (CpuFeatures::IsSupported(VFP3)) {
|
||||
CpuFeatures::Scope scope(VFP3);
|
||||
SwVfpRegister single_scratch = double_scratch.low();
|
||||
// Load the double value.
|
||||
__ sub(scratch1, object, Operand(kHeapObjectTag));
|
||||
__ vldr(double_scratch, scratch1, HeapNumber::kValueOffset);
|
||||
|
||||
__ EmitVFPTruncate(kRoundToZero,
|
||||
single_scratch,
|
||||
double_scratch,
|
||||
scratch1,
|
||||
scratch2,
|
||||
kCheckForInexactConversion);
|
||||
|
||||
// Jump to not_int32 if the operation did not succeed.
|
||||
__ b(ne, not_int32);
|
||||
// Get the result in the destination register.
|
||||
__ vmov(dst, single_scratch);
|
||||
|
||||
} else {
|
||||
// Load the double value in the destination registers.
|
||||
__ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
|
||||
__ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
|
||||
|
||||
// Check for 0 and -0.
|
||||
__ bic(dst, scratch1, Operand(HeapNumber::kSignMask));
|
||||
__ orr(dst, scratch2, Operand(dst));
|
||||
__ cmp(dst, Operand(0));
|
||||
__ b(eq, &done);
|
||||
|
||||
DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
|
||||
|
||||
// Registers state after DoubleIs32BitInteger.
|
||||
// dst: mantissa[51:20].
|
||||
// scratch2: 1
|
||||
|
||||
// Shift back the higher bits of the mantissa.
|
||||
__ mov(dst, Operand(dst, LSR, scratch3));
|
||||
// Set the implicit first bit.
|
||||
__ rsb(scratch3, scratch3, Operand(32));
|
||||
__ orr(dst, dst, Operand(scratch2, LSL, scratch3));
|
||||
// Set the sign.
|
||||
__ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
|
||||
__ tst(scratch1, Operand(HeapNumber::kSignMask));
|
||||
__ rsb(dst, dst, Operand(0), LeaveCC, mi);
|
||||
}
|
||||
|
||||
__ bind(&done);
|
||||
}
|
||||
|
||||
|
||||
void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
|
||||
Register src1,
|
||||
Register src2,
|
||||
Register dst,
|
||||
Register scratch,
|
||||
Label* not_int32) {
|
||||
// Get exponent alone in scratch.
|
||||
__ Ubfx(scratch,
|
||||
src1,
|
||||
HeapNumber::kExponentShift,
|
||||
HeapNumber::kExponentBits);
|
||||
|
||||
// Substract the bias from the exponent.
|
||||
__ sub(scratch, scratch, Operand(HeapNumber::kExponentBias), SetCC);
|
||||
|
||||
// src1: higher (exponent) part of the double value.
|
||||
// src2: lower (mantissa) part of the double value.
|
||||
// scratch: unbiased exponent.
|
||||
|
||||
// Fast cases. Check for obvious non 32-bit integer values.
|
||||
// Negative exponent cannot yield 32-bit integers.
|
||||
__ b(mi, not_int32);
|
||||
// Exponent greater than 31 cannot yield 32-bit integers.
|
||||
// Also, a positive value with an exponent equal to 31 is outside of the
|
||||
// signed 32-bit integer range.
|
||||
__ tst(src1, Operand(HeapNumber::kSignMask));
|
||||
__ cmp(scratch, Operand(30), eq); // Executed for positive. If exponent is 30
|
||||
// the gt condition will be "correct" and
|
||||
// the next instruction will be skipped.
|
||||
__ cmp(scratch, Operand(31), ne); // Executed for negative and positive where
|
||||
// exponent is not 30.
|
||||
__ b(gt, not_int32);
|
||||
// - Bits [21:0] in the mantissa are not null.
|
||||
__ tst(src2, Operand(0x3fffff));
|
||||
__ b(ne, not_int32);
|
||||
|
||||
// Otherwise the exponent needs to be big enough to shift left all the
|
||||
// non zero bits left. So we need the (30 - exponent) last bits of the
|
||||
// 31 higher bits of the mantissa to be null.
|
||||
// Because bits [21:0] are null, we can check instead that the
|
||||
// (32 - exponent) last bits of the 32 higher bits of the mantisssa are null.
|
||||
|
||||
// Get the 32 higher bits of the mantissa in dst.
|
||||
__ Ubfx(dst,
|
||||
src2,
|
||||
HeapNumber::kMantissaBitsInTopWord,
|
||||
32 - HeapNumber::kMantissaBitsInTopWord);
|
||||
__ orr(dst,
|
||||
dst,
|
||||
Operand(src1, LSL, HeapNumber::kNonMantissaBitsInTopWord));
|
||||
|
||||
// Create the mask and test the lower bits (of the higher bits).
|
||||
__ rsb(scratch, scratch, Operand(32));
|
||||
__ mov(src2, Operand(1));
|
||||
__ mov(src1, Operand(src2, LSL, scratch));
|
||||
__ sub(src1, src1, Operand(1));
|
||||
__ tst(dst, src1);
|
||||
__ b(ne, not_int32);
|
||||
}
|
||||
|
||||
|
||||
void FloatingPointHelper::CallCCodeForDoubleOperation(
|
||||
MacroAssembler* masm,
|
||||
Token::Value op,
|
||||
Register heap_number_result,
|
||||
Register scratch) {
|
||||
// Using core registers:
|
||||
// r0: Left value (least significant part of mantissa).
|
||||
// r1: Left value (sign, exponent, top of mantissa).
|
||||
// r2: Right value (least significant part of mantissa).
|
||||
// r3: Right value (sign, exponent, top of mantissa).
|
||||
|
||||
// Assert that heap_number_result is callee-saved.
|
||||
// We currently always use r5 to pass it.
|
||||
ASSERT(heap_number_result.is(r5));
|
||||
|
||||
// Push the current return address before the C call. Return will be
|
||||
// through pop(pc) below.
|
||||
__ push(lr);
|
||||
__ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments.
|
||||
// Call C routine that may not cause GC or other trouble.
|
||||
__ CallCFunction(ExternalReference::double_fp_operation(op), 4);
|
||||
// Store answer in the overwritable heap number.
|
||||
#if !defined(USE_ARM_EABI)
|
||||
// Double returned in fp coprocessor register 0 and 1, encoded as
|
||||
// register cr8. Offsets must be divisible by 4 for coprocessor so we
|
||||
// need to substract the tag from heap_number_result.
|
||||
__ sub(scratch, heap_number_result, Operand(kHeapObjectTag));
|
||||
__ stc(p1, cr8, MemOperand(scratch, HeapNumber::kValueOffset));
|
||||
#else
|
||||
// Double returned in registers 0 and 1.
|
||||
__ Strd(r0, r1, FieldMemOperand(heap_number_result,
|
||||
HeapNumber::kValueOffset));
|
||||
#endif
|
||||
// Place heap_number_result in r0 and return to the pushed return address.
|
||||
__ mov(r0, Operand(heap_number_result));
|
||||
__ pop(pc);
|
||||
}
|
||||
|
||||
|
||||
// See comment for class.
|
||||
void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
|
||||
@ -1296,6 +1682,9 @@ void CompareStub::Generate(MacroAssembler* masm) {
|
||||
// This stub does not handle the inlined cases (Smis, Booleans, undefined).
|
||||
// The stub returns zero for false, and a non-zero value for true.
|
||||
void ToBooleanStub::Generate(MacroAssembler* masm) {
|
||||
// This stub uses VFP3 instructions.
|
||||
ASSERT(CpuFeatures::IsEnabled(VFP3));
|
||||
|
||||
Label false_result;
|
||||
Label not_heap_number;
|
||||
Register scratch = r9.is(tos_) ? r7 : r9;
|
||||
@ -2704,33 +3093,11 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
|
||||
__ add(r0, r0, Operand(kHeapObjectTag));
|
||||
__ Ret();
|
||||
} else {
|
||||
// Using core registers:
|
||||
// r0: Left value (least significant part of mantissa).
|
||||
// r1: Left value (sign, exponent, top of mantissa).
|
||||
// r2: Right value (least significant part of mantissa).
|
||||
// r3: Right value (sign, exponent, top of mantissa).
|
||||
|
||||
// Push the current return address before the C call. Return will be
|
||||
// through pop(pc) below.
|
||||
__ push(lr);
|
||||
__ PrepareCallCFunction(4, scratch1); // Two doubles are 4 arguments.
|
||||
// Call C routine that may not cause GC or other trouble. r5 is callee
|
||||
// save.
|
||||
__ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
|
||||
// Store answer in the overwritable heap number.
|
||||
#if !defined(USE_ARM_EABI)
|
||||
// Double returned in fp coprocessor register 0 and 1, encoded as
|
||||
// register cr8. Offsets must be divisible by 4 for coprocessor so we
|
||||
// need to substract the tag from r5.
|
||||
__ sub(scratch1, result, Operand(kHeapObjectTag));
|
||||
__ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset));
|
||||
#else
|
||||
// Double returned in registers 0 and 1.
|
||||
__ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset));
|
||||
#endif
|
||||
// Plase result in r0 and return to the pushed return address.
|
||||
__ mov(r0, Operand(result));
|
||||
__ pop(pc);
|
||||
// Call the C function to handle the double operation.
|
||||
FloatingPointHelper::CallCCodeForDoubleOperation(masm,
|
||||
op_,
|
||||
result,
|
||||
scratch1);
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -2776,7 +3143,6 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
|
||||
break;
|
||||
case Token::SAR:
|
||||
// Use only the 5 least significant bits of the shift count.
|
||||
__ and_(r2, r2, Operand(0x1f));
|
||||
__ GetLeastBitsFromInt32(r2, r2, 5);
|
||||
__ mov(r2, Operand(r3, ASR, r2));
|
||||
break;
|
||||
@ -2921,9 +3287,290 @@ void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
|
||||
void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
|
||||
ASSERT(operands_type_ == TRBinaryOpIC::INT32);
|
||||
|
||||
Register left = r1;
|
||||
Register right = r0;
|
||||
Register scratch1 = r7;
|
||||
Register scratch2 = r9;
|
||||
DwVfpRegister double_scratch = d0;
|
||||
SwVfpRegister single_scratch = s3;
|
||||
|
||||
Register heap_number_result = no_reg;
|
||||
Register heap_number_map = r6;
|
||||
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
|
||||
|
||||
Label call_runtime;
|
||||
// Labels for type transition, used for wrong input or output types.
|
||||
// Both label are currently actually bound to the same position. We use two
|
||||
// different label to differentiate the cause leading to type transition.
|
||||
Label transition;
|
||||
|
||||
// Smi-smi fast case.
|
||||
Label skip;
|
||||
__ orr(scratch1, left, right);
|
||||
__ JumpIfNotSmi(scratch1, &skip);
|
||||
GenerateSmiSmiOperation(masm);
|
||||
// Fall through if the result is not a smi.
|
||||
__ bind(&skip);
|
||||
|
||||
switch (op_) {
|
||||
case Token::ADD:
|
||||
case Token::SUB:
|
||||
case Token::MUL:
|
||||
case Token::DIV:
|
||||
case Token::MOD: {
|
||||
// Load both operands and check that they are 32-bit integer.
|
||||
// Jump to type transition if they are not. The registers r0 and r1 (right
|
||||
// and left) are preserved for the runtime call.
|
||||
FloatingPointHelper::Destination destination =
|
||||
CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ?
|
||||
FloatingPointHelper::kVFPRegisters :
|
||||
FloatingPointHelper::kCoreRegisters;
|
||||
|
||||
FloatingPointHelper::LoadNumberAsInt32Double(masm,
|
||||
right,
|
||||
destination,
|
||||
d7,
|
||||
r2,
|
||||
r3,
|
||||
heap_number_map,
|
||||
scratch1,
|
||||
scratch2,
|
||||
s0,
|
||||
&transition);
|
||||
FloatingPointHelper::LoadNumberAsInt32Double(masm,
|
||||
left,
|
||||
destination,
|
||||
d6,
|
||||
r4,
|
||||
r5,
|
||||
heap_number_map,
|
||||
scratch1,
|
||||
scratch2,
|
||||
s0,
|
||||
&transition);
|
||||
|
||||
if (destination == FloatingPointHelper::kVFPRegisters) {
|
||||
CpuFeatures::Scope scope(VFP3);
|
||||
Label return_heap_number;
|
||||
switch (op_) {
|
||||
case Token::ADD:
|
||||
__ vadd(d5, d6, d7);
|
||||
break;
|
||||
case Token::SUB:
|
||||
__ vsub(d5, d6, d7);
|
||||
break;
|
||||
case Token::MUL:
|
||||
__ vmul(d5, d6, d7);
|
||||
break;
|
||||
case Token::DIV:
|
||||
__ vdiv(d5, d6, d7);
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
if (op_ != Token::DIV) {
|
||||
// These operations produce an integer result.
|
||||
// Try to return a smi if we can.
|
||||
// Otherwise return a heap number if allowed, or jump to type
|
||||
// transition.
|
||||
|
||||
__ EmitVFPTruncate(kRoundToZero,
|
||||
single_scratch,
|
||||
d5,
|
||||
scratch1,
|
||||
scratch2);
|
||||
|
||||
if (result_type_ <= TRBinaryOpIC::INT32) {
|
||||
// If the ne condition is set, result does
|
||||
// not fit in a 32-bit integer.
|
||||
__ b(ne, &transition);
|
||||
}
|
||||
|
||||
// Check if the result fits in a smi.
|
||||
__ vmov(scratch1, single_scratch);
|
||||
__ add(scratch2, scratch1, Operand(0x40000000), SetCC);
|
||||
// If not try to return a heap number.
|
||||
__ b(mi, &return_heap_number);
|
||||
// Tag the result and return.
|
||||
__ SmiTag(r0, scratch1);
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
if (result_type_ >= (op_ == Token::DIV) ? TRBinaryOpIC::HEAP_NUMBER
|
||||
: TRBinaryOpIC::INT32) {
|
||||
__ bind(&return_heap_number);
|
||||
// We are using vfp registers so r5 is available.
|
||||
heap_number_result = r5;
|
||||
GenerateHeapResultAllocation(masm,
|
||||
heap_number_result,
|
||||
heap_number_map,
|
||||
scratch1,
|
||||
scratch2,
|
||||
&call_runtime);
|
||||
__ sub(r0, heap_number_result, Operand(kHeapObjectTag));
|
||||
__ vstr(d5, r0, HeapNumber::kValueOffset);
|
||||
__ mov(r0, heap_number_result);
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
// A DIV operation expecting an integer result falls through
|
||||
// to type transition.
|
||||
|
||||
} else {
|
||||
// We preserved r0 and r1 to be able to call runtime.
|
||||
// Save the left value on the stack.
|
||||
__ Push(r5, r4);
|
||||
|
||||
// Allocate a heap number to store the result.
|
||||
heap_number_result = r5;
|
||||
GenerateHeapResultAllocation(masm,
|
||||
heap_number_result,
|
||||
heap_number_map,
|
||||
scratch1,
|
||||
scratch2,
|
||||
&call_runtime);
|
||||
|
||||
// Load the left value from the value saved on the stack.
|
||||
__ Pop(r1, r0);
|
||||
|
||||
// Call the C function to handle the double operation.
|
||||
FloatingPointHelper::CallCCodeForDoubleOperation(
|
||||
masm, op_, heap_number_result, scratch1);
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
case Token::BIT_OR:
|
||||
case Token::BIT_XOR:
|
||||
case Token::BIT_AND:
|
||||
case Token::SAR:
|
||||
case Token::SHR:
|
||||
case Token::SHL: {
|
||||
Label return_heap_number;
|
||||
Register scratch3 = r5;
|
||||
// Convert operands to 32-bit integers. Right in r2 and left in r3. The
|
||||
// registers r0 and r1 (right and left) are preserved for the runtime
|
||||
// call.
|
||||
FloatingPointHelper::LoadNumberAsInt32(masm,
|
||||
left,
|
||||
r3,
|
||||
heap_number_map,
|
||||
scratch1,
|
||||
scratch2,
|
||||
scratch3,
|
||||
d0,
|
||||
&transition);
|
||||
FloatingPointHelper::LoadNumberAsInt32(masm,
|
||||
right,
|
||||
r2,
|
||||
heap_number_map,
|
||||
scratch1,
|
||||
scratch2,
|
||||
scratch3,
|
||||
d0,
|
||||
&transition);
|
||||
|
||||
// The ECMA-262 standard specifies that, for shift operations, only the
|
||||
// 5 least significant bits of the shift value should be used.
|
||||
switch (op_) {
|
||||
case Token::BIT_OR:
|
||||
__ orr(r2, r3, Operand(r2));
|
||||
break;
|
||||
case Token::BIT_XOR:
|
||||
__ eor(r2, r3, Operand(r2));
|
||||
break;
|
||||
case Token::BIT_AND:
|
||||
__ and_(r2, r3, Operand(r2));
|
||||
break;
|
||||
case Token::SAR:
|
||||
__ and_(r2, r2, Operand(0x1f));
|
||||
__ mov(r2, Operand(r3, ASR, r2));
|
||||
break;
|
||||
case Token::SHR:
|
||||
__ and_(r2, r2, Operand(0x1f));
|
||||
__ mov(r2, Operand(r3, LSR, r2), SetCC);
|
||||
// SHR is special because it is required to produce a positive answer.
|
||||
// We only get a negative result if the shift value (r2) is 0.
|
||||
// This result cannot be respresented as a signed 32-bit integer, try
|
||||
// to return a heap number if we can.
|
||||
// The non vfp3 code does not support this special case, so jump to
|
||||
// runtime if we don't support it.
|
||||
if (CpuFeatures::IsSupported(VFP3)) {
|
||||
__ b(mi,
|
||||
(result_type_ <= TRBinaryOpIC::INT32) ? &transition
|
||||
: &return_heap_number);
|
||||
} else {
|
||||
__ b(mi, (result_type_ <= TRBinaryOpIC::INT32) ? &transition
|
||||
: &call_runtime);
|
||||
}
|
||||
break;
|
||||
case Token::SHL:
|
||||
__ and_(r2, r2, Operand(0x1f));
|
||||
__ mov(r2, Operand(r3, LSL, r2));
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
// Check if the result fits in a smi.
|
||||
__ add(scratch1, r2, Operand(0x40000000), SetCC);
|
||||
// If not try to return a heap number. (We know the result is an int32.)
|
||||
__ b(mi, &return_heap_number);
|
||||
// Tag the result and return.
|
||||
__ SmiTag(r0, r2);
|
||||
__ Ret();
|
||||
|
||||
__ bind(&return_heap_number);
|
||||
if (CpuFeatures::IsSupported(VFP3)) {
|
||||
CpuFeatures::Scope scope(VFP3);
|
||||
heap_number_result = r5;
|
||||
GenerateHeapResultAllocation(masm,
|
||||
heap_number_result,
|
||||
heap_number_map,
|
||||
scratch1,
|
||||
scratch2,
|
||||
&call_runtime);
|
||||
|
||||
if (op_ != Token::SHR) {
|
||||
// Convert the result to a floating point value.
|
||||
__ vmov(double_scratch.low(), r2);
|
||||
__ vcvt_f64_s32(double_scratch, double_scratch.low());
|
||||
} else {
|
||||
// The result must be interpreted as an unsigned 32-bit integer.
|
||||
__ vmov(double_scratch.low(), r2);
|
||||
__ vcvt_f64_u32(double_scratch, double_scratch.low());
|
||||
}
|
||||
|
||||
// Store the result.
|
||||
__ sub(r0, heap_number_result, Operand(kHeapObjectTag));
|
||||
__ vstr(double_scratch, r0, HeapNumber::kValueOffset);
|
||||
__ mov(r0, heap_number_result);
|
||||
__ Ret();
|
||||
} else {
|
||||
// Tail call that writes the int32 in r2 to the heap number in r0, using
|
||||
// r3 as scratch. r0 is preserved and returned.
|
||||
WriteInt32ToHeapNumberStub stub(r2, r0, r3);
|
||||
__ TailCallStub(&stub);
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
if (transition.is_linked()) {
|
||||
__ bind(&transition);
|
||||
GenerateTypeTransition(masm);
|
||||
}
|
||||
|
||||
__ bind(&call_runtime);
|
||||
GenerateCallRuntime(masm);
|
||||
}
|
||||
|
||||
|
||||
void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
|
||||
Label not_numbers, call_runtime;
|
||||
@ -5957,11 +6604,10 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
|
||||
|
||||
|
||||
void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
|
||||
ApiFunction *function) {
|
||||
ExternalReference function) {
|
||||
__ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()),
|
||||
RelocInfo::CODE_TARGET));
|
||||
__ mov(r2,
|
||||
Operand(ExternalReference(function, ExternalReference::DIRECT_CALL)));
|
||||
__ mov(r2, Operand(function));
|
||||
// Push return address (accessible to GC through exit frame pc).
|
||||
__ str(pc, MemOperand(sp, 0));
|
||||
__ Jump(r2); // Call the api function.
|
||||
|
2
deps/v8/src/arm/code-stubs-arm.h
vendored
2
deps/v8/src/arm/code-stubs-arm.h
vendored
@ -592,7 +592,7 @@ class DirectCEntryStub: public CodeStub {
|
||||
public:
|
||||
DirectCEntryStub() {}
|
||||
void Generate(MacroAssembler* masm);
|
||||
void GenerateCall(MacroAssembler* masm, ApiFunction *function);
|
||||
void GenerateCall(MacroAssembler* masm, ExternalReference function);
|
||||
void GenerateCall(MacroAssembler* masm, Register target);
|
||||
|
||||
private:
|
||||
|
47
deps/v8/src/arm/codegen-arm.cc
vendored
47
deps/v8/src/arm/codegen-arm.cc
vendored
@ -1938,8 +1938,9 @@ void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
|
||||
frame_->EmitPush(cp);
|
||||
frame_->EmitPush(Operand(pairs));
|
||||
frame_->EmitPush(Operand(Smi::FromInt(is_eval() ? 1 : 0)));
|
||||
frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
|
||||
|
||||
frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
|
||||
frame_->CallRuntime(Runtime::kDeclareGlobals, 4);
|
||||
// The result is discarded.
|
||||
}
|
||||
|
||||
@ -3287,7 +3288,8 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
|
||||
// context slot followed by initialization.
|
||||
frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
|
||||
} else {
|
||||
frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
|
||||
frame_->EmitPush(Operand(Smi::FromInt(strict_mode_flag())));
|
||||
frame_->CallRuntime(Runtime::kStoreContextSlot, 4);
|
||||
}
|
||||
// Storing a variable must keep the (new) value on the expression
|
||||
// stack. This is necessary for compiling assignment expressions.
|
||||
@ -3637,7 +3639,8 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
|
||||
Load(key);
|
||||
Load(value);
|
||||
if (property->emit_store()) {
|
||||
frame_->CallRuntime(Runtime::kSetProperty, 3);
|
||||
frame_->EmitPush(Operand(Smi::FromInt(NONE))); // PropertyAttributes
|
||||
frame_->CallRuntime(Runtime::kSetProperty, 4);
|
||||
} else {
|
||||
frame_->Drop(3);
|
||||
}
|
||||
@ -5170,11 +5173,11 @@ class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode {
|
||||
|
||||
// Set the bit in the map to indicate that it has been checked safe for
|
||||
// default valueOf and set true result.
|
||||
__ ldr(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset));
|
||||
__ ldrb(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset));
|
||||
__ orr(scratch1_,
|
||||
scratch1_,
|
||||
Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
|
||||
__ str(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset));
|
||||
__ strb(scratch1_, FieldMemOperand(map_result_, Map::kBitField2Offset));
|
||||
__ mov(map_result_, Operand(1));
|
||||
__ jmp(exit_label());
|
||||
__ bind(&false_result);
|
||||
@ -6674,8 +6677,12 @@ class DeferredReferenceSetKeyedValue: public DeferredCode {
|
||||
public:
|
||||
DeferredReferenceSetKeyedValue(Register value,
|
||||
Register key,
|
||||
Register receiver)
|
||||
: value_(value), key_(key), receiver_(receiver) {
|
||||
Register receiver,
|
||||
StrictModeFlag strict_mode)
|
||||
: value_(value),
|
||||
key_(key),
|
||||
receiver_(receiver),
|
||||
strict_mode_(strict_mode) {
|
||||
set_comment("[ DeferredReferenceSetKeyedValue");
|
||||
}
|
||||
|
||||
@ -6685,6 +6692,7 @@ class DeferredReferenceSetKeyedValue: public DeferredCode {
|
||||
Register value_;
|
||||
Register key_;
|
||||
Register receiver_;
|
||||
StrictModeFlag strict_mode_;
|
||||
};
|
||||
|
||||
|
||||
@ -6706,7 +6714,9 @@ void DeferredReferenceSetKeyedValue::Generate() {
|
||||
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
|
||||
// Call keyed store IC. It has the arguments value, key and receiver in r0,
|
||||
// r1 and r2.
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
|
||||
Handle<Code> ic(Builtins::builtin(
|
||||
(strict_mode_ == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict
|
||||
: Builtins::KeyedStoreIC_Initialize));
|
||||
__ Call(ic, RelocInfo::CODE_TARGET);
|
||||
// The call must be followed by a nop instruction to indicate that the
|
||||
// keyed store has been inlined.
|
||||
@ -6724,8 +6734,12 @@ class DeferredReferenceSetNamedValue: public DeferredCode {
|
||||
public:
|
||||
DeferredReferenceSetNamedValue(Register value,
|
||||
Register receiver,
|
||||
Handle<String> name)
|
||||
: value_(value), receiver_(receiver), name_(name) {
|
||||
Handle<String> name,
|
||||
StrictModeFlag strict_mode)
|
||||
: value_(value),
|
||||
receiver_(receiver),
|
||||
name_(name),
|
||||
strict_mode_(strict_mode) {
|
||||
set_comment("[ DeferredReferenceSetNamedValue");
|
||||
}
|
||||
|
||||
@ -6735,6 +6749,7 @@ class DeferredReferenceSetNamedValue: public DeferredCode {
|
||||
Register value_;
|
||||
Register receiver_;
|
||||
Handle<String> name_;
|
||||
StrictModeFlag strict_mode_;
|
||||
};
|
||||
|
||||
|
||||
@ -6754,7 +6769,9 @@ void DeferredReferenceSetNamedValue::Generate() {
|
||||
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
|
||||
// Call keyed store IC. It has the arguments value, key and receiver in r0,
|
||||
// r1 and r2.
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
|
||||
Handle<Code> ic(Builtins::builtin(
|
||||
(strict_mode_ == kStrictMode) ? Builtins::StoreIC_Initialize_Strict
|
||||
: Builtins::StoreIC_Initialize));
|
||||
__ Call(ic, RelocInfo::CODE_TARGET);
|
||||
// The call must be followed by a nop instruction to indicate that the
|
||||
// named store has been inlined.
|
||||
@ -6943,7 +6960,8 @@ void CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
|
||||
Register receiver = r1;
|
||||
|
||||
DeferredReferenceSetNamedValue* deferred =
|
||||
new DeferredReferenceSetNamedValue(value, receiver, name);
|
||||
new DeferredReferenceSetNamedValue(
|
||||
value, receiver, name, strict_mode_flag());
|
||||
|
||||
// Check that the receiver is a heap object.
|
||||
__ tst(receiver, Operand(kSmiTagMask));
|
||||
@ -7129,7 +7147,8 @@ void CodeGenerator::EmitKeyedStore(StaticType* key_type,
|
||||
|
||||
// The deferred code expects value, key and receiver in registers.
|
||||
DeferredReferenceSetKeyedValue* deferred =
|
||||
new DeferredReferenceSetKeyedValue(value, key, receiver);
|
||||
new DeferredReferenceSetKeyedValue(
|
||||
value, key, receiver, strict_mode_flag());
|
||||
|
||||
// Check that the value is a smi. As this inlined code does not set the
|
||||
// write barrier it is only possible to store smi values.
|
||||
@ -7214,7 +7233,7 @@ void CodeGenerator::EmitKeyedStore(StaticType* key_type,
|
||||
|
||||
deferred->BindExit();
|
||||
} else {
|
||||
frame()->CallKeyedStoreIC();
|
||||
frame()->CallKeyedStoreIC(strict_mode_flag());
|
||||
}
|
||||
}
|
||||
|
||||
|
8
deps/v8/src/arm/constants-arm.h
vendored
8
deps/v8/src/arm/constants-arm.h
vendored
@ -385,7 +385,10 @@ enum VFPConversionMode {
|
||||
kDefaultRoundToZero = 1
|
||||
};
|
||||
|
||||
// This mask does not include the "inexact" or "input denormal" cumulative
|
||||
// exceptions flags, because we usually don't want to check for it.
|
||||
static const uint32_t kVFPExceptionMask = 0xf;
|
||||
static const uint32_t kVFPInexactExceptionBit = 1 << 4;
|
||||
static const uint32_t kVFPFlushToZeroMask = 1 << 24;
|
||||
static const uint32_t kVFPInvalidExceptionBit = 1;
|
||||
|
||||
@ -411,6 +414,11 @@ enum VFPRoundingMode {
|
||||
|
||||
static const uint32_t kVFPRoundingModeMask = 3 << 22;
|
||||
|
||||
enum CheckForInexactConversion {
|
||||
kCheckForInexactConversion,
|
||||
kDontCheckForInexactConversion
|
||||
};
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Hints.
|
||||
|
||||
|
774
deps/v8/src/arm/full-codegen-arm.cc
vendored
774
deps/v8/src/arm/full-codegen-arm.cc
vendored
File diff suppressed because it is too large
Load Diff
27
deps/v8/src/arm/ic-arm.cc
vendored
27
deps/v8/src/arm/ic-arm.cc
vendored
@ -1400,7 +1400,8 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
|
||||
}
|
||||
|
||||
|
||||
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
|
||||
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
|
||||
StrictModeFlag strict_mode) {
|
||||
// ---------- S t a t e --------------
|
||||
// -- r0 : value
|
||||
// -- r1 : key
|
||||
@ -1411,11 +1412,16 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
|
||||
// Push receiver, key and value for runtime call.
|
||||
__ Push(r2, r1, r0);
|
||||
|
||||
__ TailCallRuntime(Runtime::kSetProperty, 3, 1);
|
||||
__ mov(r1, Operand(Smi::FromInt(NONE))); // PropertyAttributes
|
||||
__ mov(r0, Operand(Smi::FromInt(strict_mode))); // Strict mode.
|
||||
__ Push(r1, r0);
|
||||
|
||||
__ TailCallRuntime(Runtime::kSetProperty, 5, 1);
|
||||
}
|
||||
|
||||
|
||||
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
|
||||
StrictModeFlag strict_mode) {
|
||||
// ---------- S t a t e --------------
|
||||
// -- r0 : value
|
||||
// -- r1 : key
|
||||
@ -1470,7 +1476,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
// r0: value.
|
||||
// r1: key.
|
||||
// r2: receiver.
|
||||
GenerateRuntimeSetProperty(masm);
|
||||
GenerateRuntimeSetProperty(masm, strict_mode);
|
||||
|
||||
// Check whether the elements is a pixel array.
|
||||
// r4: elements map.
|
||||
@ -1540,7 +1546,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
|
||||
|
||||
void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
|
||||
Code::ExtraICState extra_ic_state) {
|
||||
StrictModeFlag strict_mode) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- r0 : value
|
||||
// -- r1 : receiver
|
||||
@ -1552,7 +1558,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
|
||||
Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
|
||||
NOT_IN_LOOP,
|
||||
MONOMORPHIC,
|
||||
extra_ic_state);
|
||||
strict_mode);
|
||||
StubCache::GenerateProbe(masm, flags, r1, r2, r3, r4, r5);
|
||||
|
||||
// Cache miss: Jump to runtime.
|
||||
@ -1646,7 +1652,8 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
|
||||
}
|
||||
|
||||
|
||||
void StoreIC::GenerateGlobalProxy(MacroAssembler* masm) {
|
||||
void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
|
||||
StrictModeFlag strict_mode) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- r0 : value
|
||||
// -- r1 : receiver
|
||||
@ -1656,8 +1663,12 @@ void StoreIC::GenerateGlobalProxy(MacroAssembler* masm) {
|
||||
|
||||
__ Push(r1, r2, r0);
|
||||
|
||||
__ mov(r1, Operand(Smi::FromInt(NONE))); // PropertyAttributes
|
||||
__ mov(r0, Operand(Smi::FromInt(strict_mode)));
|
||||
__ Push(r1, r0);
|
||||
|
||||
// Do tail-call to runtime routine.
|
||||
__ TailCallRuntime(Runtime::kSetProperty, 3, 1);
|
||||
__ TailCallRuntime(Runtime::kSetProperty, 5, 1);
|
||||
}
|
||||
|
||||
|
||||
|
3
deps/v8/src/arm/lithium-arm.cc
vendored
3
deps/v8/src/arm/lithium-arm.cc
vendored
@ -1154,8 +1154,7 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
|
||||
HInstanceOfKnownGlobal* instr) {
|
||||
LInstanceOfKnownGlobal* result =
|
||||
new LInstanceOfKnownGlobal(UseFixed(instr->value(), r0), FixedTemp(r4));
|
||||
MarkAsSaveDoubles(result);
|
||||
return AssignEnvironment(AssignPointerMap(DefineFixed(result, r0)));
|
||||
return MarkAsCall(DefineFixed(result, r0), instr);
|
||||
}
|
||||
|
||||
|
||||
|
74
deps/v8/src/arm/lithium-codegen-arm.cc
vendored
74
deps/v8/src/arm/lithium-codegen-arm.cc
vendored
@ -573,7 +573,8 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
|
||||
Handle<DeoptimizationInputData> data =
|
||||
Factory::NewDeoptimizationInputData(length, TENURED);
|
||||
|
||||
data->SetTranslationByteArray(*translations_.CreateByteArray());
|
||||
Handle<ByteArray> translations = translations_.CreateByteArray();
|
||||
data->SetTranslationByteArray(*translations);
|
||||
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
|
||||
|
||||
Handle<FixedArray> literals =
|
||||
@ -1985,11 +1986,7 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
|
||||
__ BlockConstPoolFor(kAdditionalDelta);
|
||||
__ mov(temp, Operand(delta * kPointerSize));
|
||||
__ StoreToSafepointRegisterSlot(temp, temp);
|
||||
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
|
||||
ASSERT_EQ(kAdditionalDelta,
|
||||
masm_->InstructionsGeneratedSince(&before_push_delta));
|
||||
RecordSafepointWithRegisters(
|
||||
instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
|
||||
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
||||
// Put the result value into the result register slot and
|
||||
// restore all registers.
|
||||
__ StoreToSafepointRegisterSlot(result, result);
|
||||
@ -2586,41 +2583,6 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
|
||||
}
|
||||
|
||||
|
||||
// Truncates a double using a specific rounding mode.
|
||||
// Clears the z flag (ne condition) if an overflow occurs.
|
||||
void LCodeGen::EmitVFPTruncate(VFPRoundingMode rounding_mode,
|
||||
SwVfpRegister result,
|
||||
DwVfpRegister double_input,
|
||||
Register scratch1,
|
||||
Register scratch2) {
|
||||
Register prev_fpscr = scratch1;
|
||||
Register scratch = scratch2;
|
||||
|
||||
// Set custom FPCSR:
|
||||
// - Set rounding mode.
|
||||
// - Clear vfp cumulative exception flags.
|
||||
// - Make sure Flush-to-zero mode control bit is unset.
|
||||
__ vmrs(prev_fpscr);
|
||||
__ bic(scratch, prev_fpscr, Operand(kVFPExceptionMask |
|
||||
kVFPRoundingModeMask |
|
||||
kVFPFlushToZeroMask));
|
||||
__ orr(scratch, scratch, Operand(rounding_mode));
|
||||
__ vmsr(scratch);
|
||||
|
||||
// Convert the argument to an integer.
|
||||
__ vcvt_s32_f64(result,
|
||||
double_input,
|
||||
kFPSCRRounding);
|
||||
|
||||
// Retrieve FPSCR.
|
||||
__ vmrs(scratch);
|
||||
// Restore FPSCR.
|
||||
__ vmsr(prev_fpscr);
|
||||
// Check for vfp exceptions.
|
||||
__ tst(scratch, Operand(kVFPExceptionMask));
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
|
||||
DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
|
||||
Register result = ToRegister(instr->result());
|
||||
@ -2628,7 +2590,7 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
|
||||
Register scratch1 = scratch0();
|
||||
Register scratch2 = ToRegister(instr->TempAt(0));
|
||||
|
||||
EmitVFPTruncate(kRoundToMinusInf,
|
||||
__ EmitVFPTruncate(kRoundToMinusInf,
|
||||
single_scratch,
|
||||
input,
|
||||
scratch1,
|
||||
@ -2654,7 +2616,7 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
|
||||
Register result = ToRegister(instr->result());
|
||||
Register scratch1 = scratch0();
|
||||
Register scratch2 = result;
|
||||
EmitVFPTruncate(kRoundToNearest,
|
||||
__ EmitVFPTruncate(kRoundToNearest,
|
||||
double_scratch0().low(),
|
||||
input,
|
||||
scratch1,
|
||||
@ -2863,8 +2825,8 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
|
||||
|
||||
// Name is always in r2.
|
||||
__ mov(r2, Operand(instr->name()));
|
||||
Handle<Code> ic(Builtins::builtin(info_->is_strict()
|
||||
? Builtins::StoreIC_Initialize_Strict
|
||||
Handle<Code> ic(Builtins::builtin(
|
||||
info_->is_strict() ? Builtins::StoreIC_Initialize_Strict
|
||||
: Builtins::StoreIC_Initialize));
|
||||
CallCode(ic, RelocInfo::CODE_TARGET, instr);
|
||||
}
|
||||
@ -2907,7 +2869,9 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
|
||||
ASSERT(ToRegister(instr->key()).is(r1));
|
||||
ASSERT(ToRegister(instr->value()).is(r0));
|
||||
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
|
||||
Handle<Code> ic(Builtins::builtin(
|
||||
info_->is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
|
||||
: Builtins::KeyedStoreIC_Initialize));
|
||||
CallCode(ic, RelocInfo::CODE_TARGET, instr);
|
||||
}
|
||||
|
||||
@ -3371,21 +3335,26 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
|
||||
Register scratch1 = scratch0();
|
||||
Register scratch2 = ToRegister(instr->TempAt(0));
|
||||
|
||||
VFPRoundingMode rounding_mode = instr->truncating() ? kRoundToMinusInf
|
||||
: kRoundToNearest;
|
||||
|
||||
EmitVFPTruncate(rounding_mode,
|
||||
__ EmitVFPTruncate(kRoundToZero,
|
||||
single_scratch,
|
||||
double_input,
|
||||
scratch1,
|
||||
scratch2);
|
||||
|
||||
// Deoptimize if we had a vfp invalid exception.
|
||||
DeoptimizeIf(ne, instr->environment());
|
||||
|
||||
// Retrieve the result.
|
||||
__ vmov(result_reg, single_scratch);
|
||||
|
||||
if (instr->truncating() &&
|
||||
instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
||||
if (!instr->truncating()) {
|
||||
// Convert result back to double and compare with input
|
||||
// to check if the conversion was exact.
|
||||
__ vmov(single_scratch, result_reg);
|
||||
__ vcvt_f64_s32(double_scratch0(), single_scratch);
|
||||
__ VFPCompareAndSetFlags(double_scratch0(), double_input);
|
||||
DeoptimizeIf(ne, instr->environment());
|
||||
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
||||
Label done;
|
||||
__ cmp(result_reg, Operand(0));
|
||||
__ b(ne, &done);
|
||||
@ -3397,6 +3366,7 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
|
||||
__ bind(&done);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
|
||||
|
5
deps/v8/src/arm/lithium-codegen-arm.h
vendored
5
deps/v8/src/arm/lithium-codegen-arm.h
vendored
@ -206,11 +206,6 @@ class LCodeGen BASE_EMBEDDED {
|
||||
// Specific math operations - used from DoUnaryMathOperation.
|
||||
void EmitIntegerMathAbs(LUnaryMathOperation* instr);
|
||||
void DoMathAbs(LUnaryMathOperation* instr);
|
||||
void EmitVFPTruncate(VFPRoundingMode rounding_mode,
|
||||
SwVfpRegister result,
|
||||
DwVfpRegister double_input,
|
||||
Register scratch1,
|
||||
Register scratch2);
|
||||
void DoMathFloor(LUnaryMathOperation* instr);
|
||||
void DoMathRound(LUnaryMathOperation* instr);
|
||||
void DoMathSqrt(LUnaryMathOperation* instr);
|
||||
|
125
deps/v8/src/arm/macro-assembler-arm.cc
vendored
125
deps/v8/src/arm/macro-assembler-arm.cc
vendored
@ -271,6 +271,29 @@ void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Bfi(Register dst,
|
||||
Register src,
|
||||
Register scratch,
|
||||
int lsb,
|
||||
int width,
|
||||
Condition cond) {
|
||||
ASSERT(0 <= lsb && lsb < 32);
|
||||
ASSERT(0 <= width && width < 32);
|
||||
ASSERT(lsb + width < 32);
|
||||
ASSERT(!scratch.is(dst));
|
||||
if (width == 0) return;
|
||||
if (!CpuFeatures::IsSupported(ARMv7)) {
|
||||
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
|
||||
bic(dst, dst, Operand(mask));
|
||||
and_(scratch, src, Operand((1 << width) - 1));
|
||||
mov(scratch, Operand(scratch, LSL, lsb));
|
||||
orr(dst, dst, scratch);
|
||||
} else {
|
||||
bfi(dst, src, lsb, width, cond);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond) {
|
||||
ASSERT(lsb < 32);
|
||||
if (!CpuFeatures::IsSupported(ARMv7)) {
|
||||
@ -1618,7 +1641,7 @@ static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
|
||||
|
||||
|
||||
MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
|
||||
ApiFunction* function, int stack_space) {
|
||||
ExternalReference function, int stack_space) {
|
||||
ExternalReference next_address =
|
||||
ExternalReference::handle_scope_next_address();
|
||||
const int kNextOffset = 0;
|
||||
@ -1883,6 +1906,52 @@ void MacroAssembler::ConvertToInt32(Register source,
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode,
|
||||
SwVfpRegister result,
|
||||
DwVfpRegister double_input,
|
||||
Register scratch1,
|
||||
Register scratch2,
|
||||
CheckForInexactConversion check_inexact) {
|
||||
ASSERT(CpuFeatures::IsSupported(VFP3));
|
||||
CpuFeatures::Scope scope(VFP3);
|
||||
Register prev_fpscr = scratch1;
|
||||
Register scratch = scratch2;
|
||||
|
||||
int32_t check_inexact_conversion =
|
||||
(check_inexact == kCheckForInexactConversion) ? kVFPInexactExceptionBit : 0;
|
||||
|
||||
// Set custom FPCSR:
|
||||
// - Set rounding mode.
|
||||
// - Clear vfp cumulative exception flags.
|
||||
// - Make sure Flush-to-zero mode control bit is unset.
|
||||
vmrs(prev_fpscr);
|
||||
bic(scratch,
|
||||
prev_fpscr,
|
||||
Operand(kVFPExceptionMask |
|
||||
check_inexact_conversion |
|
||||
kVFPRoundingModeMask |
|
||||
kVFPFlushToZeroMask));
|
||||
// 'Round To Nearest' is encoded by 0b00 so no bits need to be set.
|
||||
if (rounding_mode != kRoundToNearest) {
|
||||
orr(scratch, scratch, Operand(rounding_mode));
|
||||
}
|
||||
vmsr(scratch);
|
||||
|
||||
// Convert the argument to an integer.
|
||||
vcvt_s32_f64(result,
|
||||
double_input,
|
||||
(rounding_mode == kRoundToZero) ? kDefaultRoundToZero
|
||||
: kFPSCRRounding);
|
||||
|
||||
// Retrieve FPSCR.
|
||||
vmrs(scratch);
|
||||
// Restore FPSCR.
|
||||
vmsr(prev_fpscr);
|
||||
// Check for vfp exceptions.
|
||||
tst(scratch, Operand(kVFPExceptionMask | check_inexact_conversion));
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::GetLeastBitsFromSmi(Register dst,
|
||||
Register src,
|
||||
int num_least_bits) {
|
||||
@ -2389,6 +2458,60 @@ void MacroAssembler::CopyFields(Register dst,
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::CopyBytes(Register src,
|
||||
Register dst,
|
||||
Register length,
|
||||
Register scratch) {
|
||||
Label align_loop, align_loop_1, word_loop, byte_loop, byte_loop_1, done;
|
||||
|
||||
// Align src before copying in word size chunks.
|
||||
bind(&align_loop);
|
||||
cmp(length, Operand(0));
|
||||
b(eq, &done);
|
||||
bind(&align_loop_1);
|
||||
tst(src, Operand(kPointerSize - 1));
|
||||
b(eq, &word_loop);
|
||||
ldrb(scratch, MemOperand(src, 1, PostIndex));
|
||||
strb(scratch, MemOperand(dst, 1, PostIndex));
|
||||
sub(length, length, Operand(1), SetCC);
|
||||
b(ne, &byte_loop_1);
|
||||
|
||||
// Copy bytes in word size chunks.
|
||||
bind(&word_loop);
|
||||
if (FLAG_debug_code) {
|
||||
tst(src, Operand(kPointerSize - 1));
|
||||
Assert(eq, "Expecting alignment for CopyBytes");
|
||||
}
|
||||
cmp(length, Operand(kPointerSize));
|
||||
b(lt, &byte_loop);
|
||||
ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
|
||||
#if CAN_USE_UNALIGNED_ACCESSES
|
||||
str(scratch, MemOperand(dst, kPointerSize, PostIndex));
|
||||
#else
|
||||
strb(scratch, MemOperand(dst, 1, PostIndex));
|
||||
mov(scratch, Operand(scratch, LSR, 8));
|
||||
strb(scratch, MemOperand(dst, 1, PostIndex));
|
||||
mov(scratch, Operand(scratch, LSR, 8));
|
||||
strb(scratch, MemOperand(dst, 1, PostIndex));
|
||||
mov(scratch, Operand(scratch, LSR, 8));
|
||||
strb(scratch, MemOperand(dst, 1, PostIndex));
|
||||
#endif
|
||||
sub(length, length, Operand(kPointerSize));
|
||||
b(&word_loop);
|
||||
|
||||
// Copy the last bytes if any left.
|
||||
bind(&byte_loop);
|
||||
cmp(length, Operand(0));
|
||||
b(eq, &done);
|
||||
bind(&byte_loop_1);
|
||||
ldrb(scratch, MemOperand(src, 1, PostIndex));
|
||||
strb(scratch, MemOperand(dst, 1, PostIndex));
|
||||
sub(length, length, Operand(1), SetCC);
|
||||
b(ne, &byte_loop_1);
|
||||
bind(&done);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::CountLeadingZeros(Register zeros, // Answer.
|
||||
Register source, // Input.
|
||||
Register scratch) {
|
||||
|
51
deps/v8/src/arm/macro-assembler-arm.h
vendored
51
deps/v8/src/arm/macro-assembler-arm.h
vendored
@ -121,6 +121,15 @@ class MacroAssembler: public Assembler {
|
||||
Condition cond = al);
|
||||
void Sbfx(Register dst, Register src, int lsb, int width,
|
||||
Condition cond = al);
|
||||
// The scratch register is not used for ARMv7.
|
||||
// scratch can be the same register as src (in which case it is trashed), but
|
||||
// not the same as dst.
|
||||
void Bfi(Register dst,
|
||||
Register src,
|
||||
Register scratch,
|
||||
int lsb,
|
||||
int width,
|
||||
Condition cond = al);
|
||||
void Bfc(Register dst, int lsb, int width, Condition cond = al);
|
||||
void Usat(Register dst, int satpos, const Operand& src,
|
||||
Condition cond = al);
|
||||
@ -234,6 +243,17 @@ class MacroAssembler: public Assembler {
|
||||
}
|
||||
}
|
||||
|
||||
// Pop two registers. Pops rightmost register first (from lower address).
|
||||
void Pop(Register src1, Register src2, Condition cond = al) {
|
||||
ASSERT(!src1.is(src2));
|
||||
if (src1.code() > src2.code()) {
|
||||
ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
|
||||
} else {
|
||||
ldr(src2, MemOperand(sp, 4, PostIndex), cond);
|
||||
ldr(src1, MemOperand(sp, 4, PostIndex), cond);
|
||||
}
|
||||
}
|
||||
|
||||
// Push and pop the registers that can hold pointers, as defined by the
|
||||
// RegList constant kSafepointSavedRegisters.
|
||||
void PushSafepointRegisters();
|
||||
@ -497,6 +517,14 @@ class MacroAssembler: public Assembler {
|
||||
// Copies a fixed number of fields of heap objects from src to dst.
|
||||
void CopyFields(Register dst, Register src, RegList temps, int field_count);
|
||||
|
||||
// Copies a number of bytes from src to dst. All registers are clobbered. On
|
||||
// exit src and dst will point to the place just after where the last byte was
|
||||
// read or written and length will be zero.
|
||||
void CopyBytes(Register src,
|
||||
Register dst,
|
||||
Register length,
|
||||
Register scratch);
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Support functions.
|
||||
|
||||
@ -613,6 +641,19 @@ class MacroAssembler: public Assembler {
|
||||
DwVfpRegister double_scratch,
|
||||
Label *not_int32);
|
||||
|
||||
// Truncates a double using a specific rounding mode.
|
||||
// Clears the z flag (ne condition) if an overflow occurs.
|
||||
// If exact_conversion is true, the z flag is also cleared if the conversion
|
||||
// was inexact, ie. if the double value could not be converted exactly
|
||||
// to a 32bit integer.
|
||||
void EmitVFPTruncate(VFPRoundingMode rounding_mode,
|
||||
SwVfpRegister result,
|
||||
DwVfpRegister double_input,
|
||||
Register scratch1,
|
||||
Register scratch2,
|
||||
CheckForInexactConversion check
|
||||
= kDontCheckForInexactConversion);
|
||||
|
||||
// Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
|
||||
// instruction. On pre-ARM5 hardware this routine gives the wrong answer
|
||||
// for 0 (31 instead of 32). Source and scratch can be the same in which case
|
||||
@ -690,7 +731,7 @@ class MacroAssembler: public Assembler {
|
||||
// from handle and propagates exceptions. Restores context.
|
||||
// stack_space - space to be unwound on exit (includes the call js
|
||||
// arguments space and the additional space allocated for the fast call).
|
||||
MaybeObject* TryCallApiFunctionAndReturn(ApiFunction* function,
|
||||
MaybeObject* TryCallApiFunctionAndReturn(ExternalReference function,
|
||||
int stack_space);
|
||||
|
||||
// Jump to a runtime routine.
|
||||
@ -777,11 +818,11 @@ class MacroAssembler: public Assembler {
|
||||
mov(reg, scratch);
|
||||
}
|
||||
|
||||
void SmiUntag(Register reg) {
|
||||
mov(reg, Operand(reg, ASR, kSmiTagSize));
|
||||
void SmiUntag(Register reg, SBit s = LeaveCC) {
|
||||
mov(reg, Operand(reg, ASR, kSmiTagSize), s);
|
||||
}
|
||||
void SmiUntag(Register dst, Register src) {
|
||||
mov(dst, Operand(src, ASR, kSmiTagSize));
|
||||
void SmiUntag(Register dst, Register src, SBit s = LeaveCC) {
|
||||
mov(dst, Operand(src, ASR, kSmiTagSize), s);
|
||||
}
|
||||
|
||||
// Jump the register contains a smi.
|
||||
|
65
deps/v8/src/arm/simulator-arm.cc
vendored
65
deps/v8/src/arm/simulator-arm.cc
vendored
@ -1005,7 +1005,9 @@ int Simulator::ReadW(int32_t addr, Instruction* instr) {
|
||||
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
|
||||
return *ptr;
|
||||
}
|
||||
PrintF("Unaligned read at 0x%08x, pc=%p\n", addr, instr);
|
||||
PrintF("Unaligned read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
|
||||
addr,
|
||||
reinterpret_cast<intptr_t>(instr));
|
||||
UNIMPLEMENTED();
|
||||
return 0;
|
||||
#endif
|
||||
@ -1023,7 +1025,9 @@ void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
|
||||
*ptr = value;
|
||||
return;
|
||||
}
|
||||
PrintF("Unaligned write at 0x%08x, pc=%p\n", addr, instr);
|
||||
PrintF("Unaligned write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
|
||||
addr,
|
||||
reinterpret_cast<intptr_t>(instr));
|
||||
UNIMPLEMENTED();
|
||||
#endif
|
||||
}
|
||||
@ -1038,7 +1042,9 @@ uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
|
||||
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
|
||||
return *ptr;
|
||||
}
|
||||
PrintF("Unaligned unsigned halfword read at 0x%08x, pc=%p\n", addr, instr);
|
||||
PrintF("Unaligned unsigned halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
|
||||
addr,
|
||||
reinterpret_cast<intptr_t>(instr));
|
||||
UNIMPLEMENTED();
|
||||
return 0;
|
||||
#endif
|
||||
@ -1072,7 +1078,9 @@ void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
|
||||
*ptr = value;
|
||||
return;
|
||||
}
|
||||
PrintF("Unaligned unsigned halfword write at 0x%08x, pc=%p\n", addr, instr);
|
||||
PrintF("Unaligned unsigned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
|
||||
addr,
|
||||
reinterpret_cast<intptr_t>(instr));
|
||||
UNIMPLEMENTED();
|
||||
#endif
|
||||
}
|
||||
@ -1089,7 +1097,9 @@ void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
|
||||
*ptr = value;
|
||||
return;
|
||||
}
|
||||
PrintF("Unaligned halfword write at 0x%08x, pc=%p\n", addr, instr);
|
||||
PrintF("Unaligned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
|
||||
addr,
|
||||
reinterpret_cast<intptr_t>(instr));
|
||||
UNIMPLEMENTED();
|
||||
#endif
|
||||
}
|
||||
@ -1531,7 +1541,11 @@ typedef double (*SimulatorRuntimeFPCall)(int32_t arg0,
|
||||
|
||||
// This signature supports direct call in to API function native callback
|
||||
// (refer to InvocationCallback in v8.h).
|
||||
typedef v8::Handle<v8::Value> (*SimulatorRuntimeApiCall)(int32_t arg0);
|
||||
typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectApiCall)(int32_t arg0);
|
||||
|
||||
// This signature supports direct call to accessor getter callback.
|
||||
typedef v8::Handle<v8::Value> (*SimulatorRuntimeDirectGetterCall)(int32_t arg0,
|
||||
int32_t arg1);
|
||||
|
||||
// Software interrupt instructions are used by the simulator to call into the
|
||||
// C-based V8 runtime.
|
||||
@ -1572,14 +1586,12 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
|
||||
CHECK(stack_aligned);
|
||||
double result = target(arg0, arg1, arg2, arg3);
|
||||
SetFpResult(result);
|
||||
} else if (redirection->type() == ExternalReference::DIRECT_CALL) {
|
||||
SimulatorRuntimeApiCall target =
|
||||
reinterpret_cast<SimulatorRuntimeApiCall>(external);
|
||||
} else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
|
||||
SimulatorRuntimeDirectApiCall target =
|
||||
reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
|
||||
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
|
||||
PrintF(
|
||||
"Call to host function at %p args %08x",
|
||||
FUNCTION_ADDR(target),
|
||||
arg0);
|
||||
PrintF("Call to host function at %p args %08x",
|
||||
FUNCTION_ADDR(target), arg0);
|
||||
if (!stack_aligned) {
|
||||
PrintF(" with unaligned stack %08x\n", get_register(sp));
|
||||
}
|
||||
@ -1591,6 +1603,23 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
|
||||
PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
|
||||
}
|
||||
set_register(r0, (int32_t) *result);
|
||||
} else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
|
||||
SimulatorRuntimeDirectGetterCall target =
|
||||
reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
|
||||
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
|
||||
PrintF("Call to host function at %p args %08x %08x",
|
||||
FUNCTION_ADDR(target), arg0, arg1);
|
||||
if (!stack_aligned) {
|
||||
PrintF(" with unaligned stack %08x\n", get_register(sp));
|
||||
}
|
||||
PrintF("\n");
|
||||
}
|
||||
CHECK(stack_aligned);
|
||||
v8::Handle<v8::Value> result = target(arg0, arg1);
|
||||
if (::v8::internal::FLAG_trace_sim) {
|
||||
PrintF("Returned %p\n", reinterpret_cast<void *>(*result));
|
||||
}
|
||||
set_register(r0, (int32_t) *result);
|
||||
} else {
|
||||
// builtin call.
|
||||
ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL);
|
||||
@ -2535,6 +2564,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
|
||||
double dn_value = get_double_from_d_register(vn);
|
||||
double dm_value = get_double_from_d_register(vm);
|
||||
double dd_value = dn_value / dm_value;
|
||||
div_zero_vfp_flag_ = (dm_value == 0);
|
||||
set_d_register_from_double(vd, dd_value);
|
||||
} else {
|
||||
UNIMPLEMENTED(); // Not used by V8.
|
||||
@ -2769,14 +2799,17 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
|
||||
|
||||
inv_op_vfp_flag_ = get_inv_op_vfp_flag(mode, val, unsigned_integer);
|
||||
|
||||
double abs_diff =
|
||||
unsigned_integer ? fabs(val - static_cast<uint32_t>(temp))
|
||||
: fabs(val - temp);
|
||||
|
||||
inexact_vfp_flag_ = (abs_diff != 0);
|
||||
|
||||
if (inv_op_vfp_flag_) {
|
||||
temp = VFPConversionSaturate(val, unsigned_integer);
|
||||
} else {
|
||||
switch (mode) {
|
||||
case RN: {
|
||||
double abs_diff =
|
||||
unsigned_integer ? fabs(val - static_cast<uint32_t>(temp))
|
||||
: fabs(val - temp);
|
||||
int val_sign = (val > 0) ? 1 : -1;
|
||||
if (abs_diff > 0.5) {
|
||||
temp += val_sign;
|
||||
|
62
deps/v8/src/arm/stub-cache-arm.cc
vendored
62
deps/v8/src/arm/stub-cache-arm.cc
vendored
@ -655,12 +655,10 @@ static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
|
||||
// already generated). Do not allow the assembler to perform a
|
||||
// garbage collection but instead return the allocation failure
|
||||
// object.
|
||||
MaybeObject* result = masm->TryCallApiFunctionAndReturn(
|
||||
&fun, argc + kFastApiCallArguments + 1);
|
||||
if (result->IsFailure()) {
|
||||
return result;
|
||||
}
|
||||
return Heap::undefined_value();
|
||||
const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
|
||||
ExternalReference ref =
|
||||
ExternalReference(&fun, ExternalReference::DIRECT_API_CALL);
|
||||
return masm->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
|
||||
}
|
||||
|
||||
class CallInterceptorCompiler BASE_EMBEDDED {
|
||||
@ -1245,18 +1243,38 @@ MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
|
||||
CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
|
||||
name, miss);
|
||||
|
||||
// Push the arguments on the JS stack of the caller.
|
||||
__ push(receiver); // Receiver.
|
||||
__ mov(scratch3, Operand(Handle<AccessorInfo>(callback))); // callback data
|
||||
__ ldr(ip, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
|
||||
__ Push(reg, ip, scratch3, name_reg);
|
||||
// Build AccessorInfo::args_ list on the stack and push property name below
|
||||
// the exit frame to make GC aware of them and store pointers to them.
|
||||
__ push(receiver);
|
||||
__ mov(scratch2, sp); // scratch2 = AccessorInfo::args_
|
||||
Handle<AccessorInfo> callback_handle(callback);
|
||||
if (Heap::InNewSpace(callback_handle->data())) {
|
||||
__ Move(scratch3, callback_handle);
|
||||
__ ldr(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
|
||||
} else {
|
||||
__ Move(scratch3, Handle<Object>(callback_handle->data()));
|
||||
}
|
||||
__ Push(reg, scratch3, name_reg);
|
||||
__ mov(r0, sp); // r0 = Handle<String>
|
||||
|
||||
// Do tail-call to the runtime system.
|
||||
ExternalReference load_callback_property =
|
||||
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
|
||||
__ TailCallExternalReference(load_callback_property, 5, 1);
|
||||
Address getter_address = v8::ToCData<Address>(callback->getter());
|
||||
ApiFunction fun(getter_address);
|
||||
|
||||
return Heap::undefined_value(); // Success.
|
||||
const int kApiStackSpace = 1;
|
||||
__ EnterExitFrame(false, kApiStackSpace);
|
||||
// Create AccessorInfo instance on the stack above the exit frame with
|
||||
// scratch2 (internal::Object **args_) as the data.
|
||||
__ str(scratch2, MemOperand(sp, 1 * kPointerSize));
|
||||
__ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&
|
||||
|
||||
// Emitting a stub call may try to allocate (if the code is not
|
||||
// already generated). Do not allow the assembler to perform a
|
||||
// garbage collection but instead return the allocation failure
|
||||
// object.
|
||||
const int kStackUnwindSpace = 4;
|
||||
ExternalReference ref =
|
||||
ExternalReference(&fun, ExternalReference::DIRECT_GETTER_CALL);
|
||||
return masm()->TryCallApiFunctionAndReturn(ref, kStackUnwindSpace);
|
||||
}
|
||||
|
||||
|
||||
@ -2653,10 +2671,13 @@ MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
|
||||
|
||||
__ Push(r1, r2, r0); // Receiver, name, value.
|
||||
|
||||
__ mov(r0, Operand(Smi::FromInt(strict_mode_)));
|
||||
__ push(r0); // strict mode
|
||||
|
||||
// Do tail-call to the runtime system.
|
||||
ExternalReference store_ic_property =
|
||||
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty));
|
||||
__ TailCallExternalReference(store_ic_property, 3, 1);
|
||||
__ TailCallExternalReference(store_ic_property, 4, 1);
|
||||
|
||||
// Handle store cache miss.
|
||||
__ bind(&miss);
|
||||
@ -4038,7 +4059,12 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
|
||||
// Push receiver, key and value for runtime call.
|
||||
__ Push(r2, r1, r0);
|
||||
|
||||
__ TailCallRuntime(Runtime::kSetProperty, 3, 1);
|
||||
__ mov(r1, Operand(Smi::FromInt(NONE))); // PropertyAttributes
|
||||
__ mov(r0, Operand(Smi::FromInt(
|
||||
Code::ExtractExtraICStateFromFlags(flags) & kStrictMode)));
|
||||
__ Push(r1, r0);
|
||||
|
||||
__ TailCallRuntime(Runtime::kSetProperty, 5, 1);
|
||||
|
||||
return GetCode(flags);
|
||||
}
|
||||
|
10
deps/v8/src/arm/virtual-frame-arm.cc
vendored
10
deps/v8/src/arm/virtual-frame-arm.cc
vendored
@ -332,8 +332,8 @@ void VirtualFrame::CallLoadIC(Handle<String> name, RelocInfo::Mode mode) {
|
||||
void VirtualFrame::CallStoreIC(Handle<String> name,
|
||||
bool is_contextual,
|
||||
StrictModeFlag strict_mode) {
|
||||
Handle<Code> ic(Builtins::builtin(strict_mode == kStrictMode
|
||||
? Builtins::StoreIC_Initialize_Strict
|
||||
Handle<Code> ic(Builtins::builtin(
|
||||
(strict_mode == kStrictMode) ? Builtins::StoreIC_Initialize_Strict
|
||||
: Builtins::StoreIC_Initialize));
|
||||
PopToR0();
|
||||
RelocInfo::Mode mode;
|
||||
@ -359,8 +359,10 @@ void VirtualFrame::CallKeyedLoadIC() {
|
||||
}
|
||||
|
||||
|
||||
void VirtualFrame::CallKeyedStoreIC() {
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
|
||||
void VirtualFrame::CallKeyedStoreIC(StrictModeFlag strict_mode) {
|
||||
Handle<Code> ic(Builtins::builtin(
|
||||
(strict_mode == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict
|
||||
: Builtins::KeyedStoreIC_Initialize));
|
||||
PopToR1R0();
|
||||
SpillAll();
|
||||
EmitPop(r2);
|
||||
|
2
deps/v8/src/arm/virtual-frame-arm.h
vendored
2
deps/v8/src/arm/virtual-frame-arm.h
vendored
@ -303,7 +303,7 @@ class VirtualFrame : public ZoneObject {
|
||||
|
||||
// Call keyed store IC. Value, key and receiver are on the stack. All three
|
||||
// are consumed. Result is returned in r0.
|
||||
void CallKeyedStoreIC();
|
||||
void CallKeyedStoreIC(StrictModeFlag strict_mode);
|
||||
|
||||
// Call into an IC stub given the number of arguments it removes
|
||||
// from the stack. Register arguments to the IC stub are implicit,
|
||||
|
9
deps/v8/src/array.js
vendored
9
deps/v8/src/array.js
vendored
@ -418,7 +418,6 @@ function ArrayPush() {
|
||||
|
||||
|
||||
function ArrayConcat(arg1) { // length == 1
|
||||
// TODO: can we just use arguments?
|
||||
var arg_count = %_ArgumentsLength();
|
||||
var arrays = new $Array(1 + arg_count);
|
||||
arrays[0] = this;
|
||||
@ -1018,13 +1017,13 @@ function ArrayIndexOf(element, index) {
|
||||
}
|
||||
var min = index;
|
||||
var max = length;
|
||||
if (UseSparseVariant(this, length, true)) {
|
||||
if (UseSparseVariant(this, length, IS_ARRAY(this))) {
|
||||
var intervals = %GetArrayKeys(this, length);
|
||||
if (intervals.length == 2 && intervals[0] < 0) {
|
||||
// A single interval.
|
||||
var intervalMin = -(intervals[0] + 1);
|
||||
var intervalMax = intervalMin + intervals[1];
|
||||
min = MAX(min, intervalMin);
|
||||
if (min < intervalMin) min = intervalMin;
|
||||
max = intervalMax; // Capped by length already.
|
||||
// Fall through to loop below.
|
||||
} else {
|
||||
@ -1074,13 +1073,13 @@ function ArrayLastIndexOf(element, index) {
|
||||
}
|
||||
var min = 0;
|
||||
var max = index;
|
||||
if (UseSparseVariant(this, length, true)) {
|
||||
if (UseSparseVariant(this, length, IS_ARRAY(this))) {
|
||||
var intervals = %GetArrayKeys(this, index + 1);
|
||||
if (intervals.length == 2 && intervals[0] < 0) {
|
||||
// A single interval.
|
||||
var intervalMin = -(intervals[0] + 1);
|
||||
var intervalMax = intervalMin + intervals[1];
|
||||
min = MAX(min, intervalMin);
|
||||
if (min < intervalMin) min = intervalMin;
|
||||
max = intervalMax; // Capped by index already.
|
||||
// Fall through to loop below.
|
||||
} else {
|
||||
|
2
deps/v8/src/assembler.cc
vendored
2
deps/v8/src/assembler.cc
vendored
@ -252,7 +252,7 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
|
||||
WriteExtraTaggedPC(pc_delta, kPCJumpTag);
|
||||
WriteExtraTaggedData(rinfo->data() - last_data_, kCommentTag);
|
||||
last_data_ = rinfo->data();
|
||||
ASSERT(begin_pos - pos_ == RelocInfo::kRelocCommentSize);
|
||||
ASSERT(begin_pos - pos_ >= RelocInfo::kMinRelocCommentSize);
|
||||
} else {
|
||||
// For all other modes we simply use the mode as the extra tag.
|
||||
// None of these modes need a data component.
|
||||
|
31
deps/v8/src/assembler.h
vendored
31
deps/v8/src/assembler.h
vendored
@ -184,10 +184,10 @@ class RelocInfo BASE_EMBEDDED {
|
||||
// we do not normally record relocation info.
|
||||
static const char* kFillerCommentString;
|
||||
|
||||
// The size of a comment is equal to tree bytes for the extra tagged pc +
|
||||
// the tag for the data, and kPointerSize for the actual pointer to the
|
||||
// The minimum size of a comment is equal to three bytes for the extra tagged
|
||||
// pc + the tag for the data, and kPointerSize for the actual pointer to the
|
||||
// comment.
|
||||
static const int kRelocCommentSize = 3 + kPointerSize;
|
||||
static const int kMinRelocCommentSize = 3 + kPointerSize;
|
||||
|
||||
// The maximum size for a call instruction including pc-jump.
|
||||
static const int kMaxCallSize = 6;
|
||||
@ -481,21 +481,22 @@ class Debug_Address;
|
||||
class ExternalReference BASE_EMBEDDED {
|
||||
public:
|
||||
// Used in the simulator to support different native api calls.
|
||||
//
|
||||
// BUILTIN_CALL - builtin call.
|
||||
// MaybeObject* f(v8::internal::Arguments).
|
||||
//
|
||||
// FP_RETURN_CALL - builtin call that returns floating point.
|
||||
// double f(double, double).
|
||||
//
|
||||
// DIRECT_CALL - direct call to API function native callback
|
||||
// from generated code.
|
||||
// Handle<Value> f(v8::Arguments&)
|
||||
//
|
||||
enum Type {
|
||||
// Builtin call.
|
||||
// MaybeObject* f(v8::internal::Arguments).
|
||||
BUILTIN_CALL, // default
|
||||
|
||||
// Builtin call that returns floating point.
|
||||
// double f(double, double).
|
||||
FP_RETURN_CALL,
|
||||
DIRECT_CALL
|
||||
|
||||
// Direct call to API function callback.
|
||||
// Handle<Value> f(v8::Arguments&)
|
||||
DIRECT_API_CALL,
|
||||
|
||||
// Direct call to accessor getter callback.
|
||||
// Handle<value> f(Local<String> property, AccessorInfo& info)
|
||||
DIRECT_GETTER_CALL
|
||||
};
|
||||
|
||||
typedef void* ExternalReferenceRedirector(void* original, Type type);
|
||||
|
20
deps/v8/src/builtins.cc
vendored
20
deps/v8/src/builtins.cc
vendored
@ -1328,12 +1328,12 @@ static void Generate_StoreIC_Normal_Strict(MacroAssembler* masm) {
|
||||
|
||||
|
||||
static void Generate_StoreIC_Megamorphic(MacroAssembler* masm) {
|
||||
StoreIC::GenerateMegamorphic(masm, StoreIC::kStoreICNonStrict);
|
||||
StoreIC::GenerateMegamorphic(masm, kNonStrictMode);
|
||||
}
|
||||
|
||||
|
||||
static void Generate_StoreIC_Megamorphic_Strict(MacroAssembler* masm) {
|
||||
StoreIC::GenerateMegamorphic(masm, StoreIC::kStoreICStrict);
|
||||
StoreIC::GenerateMegamorphic(masm, kStrictMode);
|
||||
}
|
||||
|
||||
|
||||
@ -1348,17 +1348,22 @@ static void Generate_StoreIC_ArrayLength_Strict(MacroAssembler* masm) {
|
||||
|
||||
|
||||
static void Generate_StoreIC_GlobalProxy(MacroAssembler* masm) {
|
||||
StoreIC::GenerateGlobalProxy(masm);
|
||||
StoreIC::GenerateGlobalProxy(masm, kNonStrictMode);
|
||||
}
|
||||
|
||||
|
||||
static void Generate_StoreIC_GlobalProxy_Strict(MacroAssembler* masm) {
|
||||
StoreIC::GenerateGlobalProxy(masm);
|
||||
StoreIC::GenerateGlobalProxy(masm, kStrictMode);
|
||||
}
|
||||
|
||||
|
||||
static void Generate_KeyedStoreIC_Generic(MacroAssembler* masm) {
|
||||
KeyedStoreIC::GenerateGeneric(masm);
|
||||
KeyedStoreIC::GenerateGeneric(masm, kNonStrictMode);
|
||||
}
|
||||
|
||||
|
||||
static void Generate_KeyedStoreIC_Generic_Strict(MacroAssembler* masm) {
|
||||
KeyedStoreIC::GenerateGeneric(masm, kStrictMode);
|
||||
}
|
||||
|
||||
|
||||
@ -1372,6 +1377,11 @@ static void Generate_KeyedStoreIC_Initialize(MacroAssembler* masm) {
|
||||
}
|
||||
|
||||
|
||||
static void Generate_KeyedStoreIC_Initialize_Strict(MacroAssembler* masm) {
|
||||
KeyedStoreIC::GenerateInitialize(masm);
|
||||
}
|
||||
|
||||
|
||||
#ifdef ENABLE_DEBUGGER_SUPPORT
|
||||
static void Generate_LoadIC_DebugBreak(MacroAssembler* masm) {
|
||||
Debug::GenerateLoadICDebugBreak(masm);
|
||||
|
15
deps/v8/src/builtins.h
vendored
15
deps/v8/src/builtins.h
vendored
@ -136,21 +136,26 @@ enum BuiltinExtraArguments {
|
||||
V(StoreIC_GlobalProxy, STORE_IC, MEGAMORPHIC, \
|
||||
Code::kNoExtraICState) \
|
||||
V(StoreIC_Initialize_Strict, STORE_IC, UNINITIALIZED, \
|
||||
StoreIC::kStoreICStrict) \
|
||||
kStrictMode) \
|
||||
V(StoreIC_ArrayLength_Strict, STORE_IC, MONOMORPHIC, \
|
||||
StoreIC::kStoreICStrict) \
|
||||
kStrictMode) \
|
||||
V(StoreIC_Normal_Strict, STORE_IC, MONOMORPHIC, \
|
||||
StoreIC::kStoreICStrict) \
|
||||
kStrictMode) \
|
||||
V(StoreIC_Megamorphic_Strict, STORE_IC, MEGAMORPHIC, \
|
||||
StoreIC::kStoreICStrict) \
|
||||
kStrictMode) \
|
||||
V(StoreIC_GlobalProxy_Strict, STORE_IC, MEGAMORPHIC, \
|
||||
StoreIC::kStoreICStrict) \
|
||||
kStrictMode) \
|
||||
\
|
||||
V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, \
|
||||
Code::kNoExtraICState) \
|
||||
V(KeyedStoreIC_Generic, KEYED_STORE_IC, MEGAMORPHIC, \
|
||||
Code::kNoExtraICState) \
|
||||
\
|
||||
V(KeyedStoreIC_Initialize_Strict, KEYED_STORE_IC, UNINITIALIZED, \
|
||||
kStrictMode) \
|
||||
V(KeyedStoreIC_Generic_Strict, KEYED_STORE_IC, MEGAMORPHIC, \
|
||||
kStrictMode) \
|
||||
\
|
||||
/* Uses KeyedLoadIC_Initialize; must be after in list. */ \
|
||||
V(FunctionCall, BUILTIN, UNINITIALIZED, \
|
||||
Code::kNoExtraICState) \
|
||||
|
7
deps/v8/src/compiler.cc
vendored
7
deps/v8/src/compiler.cc
vendored
@ -221,11 +221,12 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
|
||||
// or perform on-stack replacement for function with too many
|
||||
// stack-allocated local variables.
|
||||
//
|
||||
// The encoding is as a signed value, with parameters using the negative
|
||||
// indices and locals the non-negative ones.
|
||||
// The encoding is as a signed value, with parameters and receiver using
|
||||
// the negative indices and locals the non-negative ones.
|
||||
const int limit = LUnallocated::kMaxFixedIndices / 2;
|
||||
Scope* scope = info->scope();
|
||||
if (scope->num_parameters() > limit || scope->num_stack_slots() > limit) {
|
||||
if ((scope->num_parameters() + 1) > limit ||
|
||||
scope->num_stack_slots() > limit) {
|
||||
AbortAndDisable(info);
|
||||
// True indicates the compilation pipeline is still going, not
|
||||
// necessarily that we optimized the code.
|
||||
|
8
deps/v8/src/d8.cc
vendored
8
deps/v8/src/d8.cc
vendored
@ -405,7 +405,7 @@ void Shell::AddHistogramSample(void* histogram, int sample) {
|
||||
void Shell::Initialize() {
|
||||
Shell::counter_map_ = new CounterMap();
|
||||
// Set up counters
|
||||
if (i::FLAG_map_counters != NULL)
|
||||
if (i::StrLength(i::FLAG_map_counters) != 0)
|
||||
MapCounters(i::FLAG_map_counters);
|
||||
if (i::FLAG_dump_counters) {
|
||||
V8::SetCounterFunction(LookupCounter);
|
||||
@ -425,6 +425,12 @@ void Shell::Initialize() {
|
||||
global_template->Set(String::New("quit"), FunctionTemplate::New(Quit));
|
||||
global_template->Set(String::New("version"), FunctionTemplate::New(Version));
|
||||
|
||||
#ifdef LIVE_OBJECT_LIST
|
||||
global_template->Set(String::New("lol_is_enabled"), Boolean::New(true));
|
||||
#else
|
||||
global_template->Set(String::New("lol_is_enabled"), Boolean::New(false));
|
||||
#endif
|
||||
|
||||
Handle<ObjectTemplate> os_templ = ObjectTemplate::New();
|
||||
AddOSMethods(os_templ);
|
||||
global_template->Set(String::New("os"), os_templ);
|
||||
|
618
deps/v8/src/d8.js
vendored
618
deps/v8/src/d8.js
vendored
@ -117,6 +117,10 @@ Debug.State = {
|
||||
var trace_compile = false; // Tracing all compile events?
|
||||
var trace_debug_json = false; // Tracing all debug json packets?
|
||||
var last_cmd_line = '';
|
||||
//var lol_is_enabled; // Set to true in d8.cc if LIVE_OBJECT_LIST is defined.
|
||||
var lol_next_dump_index = 0;
|
||||
const kDefaultLolLinesToPrintAtATime = 10;
|
||||
const kMaxLolLinesToPrintAtATime = 1000;
|
||||
var repeat_cmd_line = '';
|
||||
var is_running = true;
|
||||
|
||||
@ -495,6 +499,13 @@ function DebugRequest(cmd_line) {
|
||||
this.request_ = void 0;
|
||||
break;
|
||||
|
||||
case 'liveobjectlist':
|
||||
case 'lol':
|
||||
if (lol_is_enabled) {
|
||||
this.request_ = this.lolToJSONRequest_(args, is_repeating);
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
throw new Error('Unknown command "' + cmd + '"');
|
||||
}
|
||||
@ -539,10 +550,54 @@ DebugRequest.prototype.createRequest = function(command) {
|
||||
};
|
||||
|
||||
|
||||
// Note: we use detected command repetition as a signal for continuation here.
|
||||
DebugRequest.prototype.createLOLRequest = function(command,
|
||||
start_index,
|
||||
lines_to_dump,
|
||||
is_continuation) {
|
||||
if (is_continuation) {
|
||||
start_index = lol_next_dump_index;
|
||||
}
|
||||
|
||||
if (lines_to_dump) {
|
||||
lines_to_dump = parseInt(lines_to_dump);
|
||||
} else {
|
||||
lines_to_dump = kDefaultLolLinesToPrintAtATime;
|
||||
}
|
||||
if (lines_to_dump > kMaxLolLinesToPrintAtATime) {
|
||||
lines_to_dump = kMaxLolLinesToPrintAtATime;
|
||||
}
|
||||
|
||||
// Save the next start_index to dump from:
|
||||
lol_next_dump_index = start_index + lines_to_dump;
|
||||
|
||||
var request = this.createRequest(command);
|
||||
request.arguments = {};
|
||||
request.arguments.start = start_index;
|
||||
request.arguments.count = lines_to_dump;
|
||||
|
||||
return request;
|
||||
};
|
||||
|
||||
|
||||
// Create a JSON request for the evaluation command.
|
||||
DebugRequest.prototype.makeEvaluateJSONRequest_ = function(expression) {
|
||||
// Global varaible used to store whether a handle was requested.
|
||||
lookup_handle = null;
|
||||
|
||||
if (lol_is_enabled) {
|
||||
// Check if the expression is a obj id in the form @<obj id>.
|
||||
var obj_id_match = expression.match(/^@([0-9]+)$/);
|
||||
if (obj_id_match) {
|
||||
var obj_id = parseInt(obj_id_match[1]);
|
||||
// Build a dump request.
|
||||
var request = this.createRequest('getobj');
|
||||
request.arguments = {};
|
||||
request.arguments.obj_id = obj_id;
|
||||
return request.toJSONProtocol();
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the expression is a handle id in the form #<handle>#.
|
||||
var handle_match = expression.match(/^#([0-9]*)#$/);
|
||||
if (handle_match) {
|
||||
@ -1103,6 +1158,10 @@ DebugRequest.prototype.infoCommandToJSONRequest_ = function(args) {
|
||||
// Build a evaluate request from the text command.
|
||||
request = this.createRequest('frame');
|
||||
last_cmd = 'info args';
|
||||
} else if (lol_is_enabled &&
|
||||
args && (args == 'liveobjectlist' || args == 'lol')) {
|
||||
// Build a evaluate request from the text command.
|
||||
return this.liveObjectListToJSONRequest_(null);
|
||||
} else {
|
||||
throw new Error('Invalid info arguments.');
|
||||
}
|
||||
@ -1153,6 +1212,262 @@ DebugRequest.prototype.gcToJSONRequest_ = function(args) {
|
||||
};
|
||||
|
||||
|
||||
// Args: [v[erbose]] [<N>] [i[ndex] <i>] [t[ype] <type>] [sp[ace] <space>]
|
||||
DebugRequest.prototype.lolMakeListRequest =
|
||||
function(cmd, args, first_arg_index, is_repeating) {
|
||||
|
||||
var request;
|
||||
var start_index = 0;
|
||||
var dump_limit = void 0;
|
||||
var type_filter = void 0;
|
||||
var space_filter = void 0;
|
||||
var prop_filter = void 0;
|
||||
var is_verbose = false;
|
||||
var i;
|
||||
|
||||
for (i = first_arg_index; i < args.length; i++) {
|
||||
var arg = args[i];
|
||||
// Check for [v[erbose]]:
|
||||
if (arg === 'verbose' || arg === 'v') {
|
||||
// Nothing to do. This is already implied by args.length > 3.
|
||||
is_verbose = true;
|
||||
|
||||
// Check for [<N>]:
|
||||
} else if (arg.match(/^[0-9]+$/)) {
|
||||
dump_limit = arg;
|
||||
is_verbose = true;
|
||||
|
||||
// Check for i[ndex] <i>:
|
||||
} else if (arg === 'index' || arg === 'i') {
|
||||
i++;
|
||||
if (args.length < i) {
|
||||
throw new Error('Missing index after ' + arg + '.');
|
||||
}
|
||||
start_index = parseInt(args[i]);
|
||||
// The user input start index starts at 1:
|
||||
if (start_index <= 0) {
|
||||
throw new Error('Invalid index ' + args[i] + '.');
|
||||
}
|
||||
start_index -= 1;
|
||||
is_verbose = true;
|
||||
|
||||
// Check for t[ype] <type>:
|
||||
} else if (arg === 'type' || arg === 't') {
|
||||
i++;
|
||||
if (args.length < i) {
|
||||
throw new Error('Missing type after ' + arg + '.');
|
||||
}
|
||||
type_filter = args[i];
|
||||
|
||||
// Check for space <heap space name>:
|
||||
} else if (arg === 'space' || arg === 'sp') {
|
||||
i++;
|
||||
if (args.length < i) {
|
||||
throw new Error('Missing space name after ' + arg + '.');
|
||||
}
|
||||
space_filter = args[i];
|
||||
|
||||
// Check for property <prop name>:
|
||||
} else if (arg === 'property' || arg === 'prop') {
|
||||
i++;
|
||||
if (args.length < i) {
|
||||
throw new Error('Missing property name after ' + arg + '.');
|
||||
}
|
||||
prop_filter = args[i];
|
||||
|
||||
} else {
|
||||
throw new Error('Unknown args at ' + arg + '.');
|
||||
}
|
||||
}
|
||||
|
||||
// Build the verbose request:
|
||||
if (is_verbose) {
|
||||
request = this.createLOLRequest('lol-'+cmd,
|
||||
start_index,
|
||||
dump_limit,
|
||||
is_repeating);
|
||||
request.arguments.verbose = true;
|
||||
} else {
|
||||
request = this.createRequest('lol-'+cmd);
|
||||
request.arguments = {};
|
||||
}
|
||||
|
||||
request.arguments.filter = {};
|
||||
if (type_filter) {
|
||||
request.arguments.filter.type = type_filter;
|
||||
}
|
||||
if (space_filter) {
|
||||
request.arguments.filter.space = space_filter;
|
||||
}
|
||||
if (prop_filter) {
|
||||
request.arguments.filter.prop = prop_filter;
|
||||
}
|
||||
|
||||
return request;
|
||||
}
|
||||
|
||||
|
||||
function extractObjId(args) {
|
||||
var id = args;
|
||||
id = id.match(/^@([0-9]+)$/);
|
||||
if (id) {
|
||||
id = id[1];
|
||||
} else {
|
||||
throw new Error('Invalid obj id ' + args + '.');
|
||||
}
|
||||
return parseInt(id);
|
||||
}
|
||||
|
||||
|
||||
DebugRequest.prototype.lolToJSONRequest_ = function(args, is_repeating) {
|
||||
var request;
|
||||
// Use default command if one is not specified:
|
||||
if (!args) {
|
||||
args = 'info';
|
||||
}
|
||||
|
||||
var orig_args = args;
|
||||
var first_arg_index;
|
||||
|
||||
var arg, i;
|
||||
var args = args.split(/\s+/g);
|
||||
var cmd = args[0];
|
||||
var id;
|
||||
|
||||
// Command: <id> [v[erbose]] ...
|
||||
if (cmd.match(/^[0-9]+$/)) {
|
||||
// Convert to the padded list command:
|
||||
// Command: l[ist] <dummy> <id> [v[erbose]] ...
|
||||
|
||||
// Insert the implicit 'list' in front and process as normal:
|
||||
cmd = 'list';
|
||||
args.unshift(cmd);
|
||||
}
|
||||
|
||||
switch(cmd) {
|
||||
// Command: c[apture]
|
||||
case 'capture':
|
||||
case 'c':
|
||||
request = this.createRequest('lol-capture');
|
||||
break;
|
||||
|
||||
// Command: clear|d[elete] <id>|all
|
||||
case 'clear':
|
||||
case 'delete':
|
||||
case 'del': {
|
||||
if (args.length < 2) {
|
||||
throw new Error('Missing argument after ' + cmd + '.');
|
||||
} else if (args.length > 2) {
|
||||
throw new Error('Too many arguments after ' + cmd + '.');
|
||||
}
|
||||
id = args[1];
|
||||
if (id.match(/^[0-9]+$/)) {
|
||||
// Delete a specific lol record:
|
||||
request = this.createRequest('lol-delete');
|
||||
request.arguments = {};
|
||||
request.arguments.id = parseInt(id);
|
||||
} else if (id === 'all') {
|
||||
// Delete all:
|
||||
request = this.createRequest('lol-reset');
|
||||
} else {
|
||||
throw new Error('Invalid argument after ' + cmd + '.');
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
// Command: diff <id1> <id2> [<dump options>]
|
||||
case 'diff':
|
||||
first_arg_index = 3;
|
||||
|
||||
// Command: list <dummy> <id> [<dump options>]
|
||||
case 'list':
|
||||
|
||||
// Command: ret[ainers] <obj id> [<dump options>]
|
||||
case 'retainers':
|
||||
case 'ret':
|
||||
case 'retaining-paths':
|
||||
case 'rp': {
|
||||
if (cmd === 'ret') cmd = 'retainers';
|
||||
else if (cmd === 'rp') cmd = 'retaining-paths';
|
||||
|
||||
if (!first_arg_index) first_arg_index = 2;
|
||||
|
||||
if (args.length < first_arg_index) {
|
||||
throw new Error('Too few arguments after ' + cmd + '.');
|
||||
}
|
||||
|
||||
var request_cmd = (cmd === 'list') ? 'diff':cmd;
|
||||
request = this.lolMakeListRequest(request_cmd,
|
||||
args,
|
||||
first_arg_index,
|
||||
is_repeating);
|
||||
|
||||
if (cmd === 'diff') {
|
||||
request.arguments.id1 = parseInt(args[1]);
|
||||
request.arguments.id2 = parseInt(args[2]);
|
||||
} else if (cmd == 'list') {
|
||||
request.arguments.id1 = 0;
|
||||
request.arguments.id2 = parseInt(args[1]);
|
||||
} else {
|
||||
request.arguments.id = extractObjId(args[1]);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
// Command: getid
|
||||
case 'getid': {
|
||||
request = this.createRequest('lol-getid');
|
||||
request.arguments = {};
|
||||
request.arguments.address = args[1];
|
||||
break;
|
||||
}
|
||||
|
||||
// Command: inf[o] [<N>]
|
||||
case 'info':
|
||||
case 'inf': {
|
||||
if (args.length > 2) {
|
||||
throw new Error('Too many arguments after ' + cmd + '.');
|
||||
}
|
||||
// Built the info request:
|
||||
request = this.createLOLRequest('lol-info', 0, args[1], is_repeating);
|
||||
break;
|
||||
}
|
||||
|
||||
// Command: path <obj id 1> <obj id 2>
|
||||
case 'path': {
|
||||
request = this.createRequest('lol-path');
|
||||
request.arguments = {};
|
||||
if (args.length > 2) {
|
||||
request.arguments.id1 = extractObjId(args[1]);
|
||||
request.arguments.id2 = extractObjId(args[2]);
|
||||
} else {
|
||||
request.arguments.id1 = 0;
|
||||
request.arguments.id2 = extractObjId(args[1]);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
// Command: print
|
||||
case 'print': {
|
||||
request = this.createRequest('lol-print');
|
||||
request.arguments = {};
|
||||
request.arguments.id = extractObjId(args[1]);
|
||||
break;
|
||||
}
|
||||
|
||||
// Command: reset
|
||||
case 'reset': {
|
||||
request = this.createRequest('lol-reset');
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
throw new Error('Invalid arguments.');
|
||||
}
|
||||
return request.toJSONProtocol();
|
||||
};
|
||||
|
||||
|
||||
// Create a JSON request for the threads command.
|
||||
DebugRequest.prototype.threadsCommandToJSONRequest_ = function(args) {
|
||||
// Build a threads request from the text command.
|
||||
@ -1239,6 +1554,49 @@ DebugRequest.prototype.helpCommand_ = function(args) {
|
||||
print('');
|
||||
print('gc - runs the garbage collector');
|
||||
print('');
|
||||
|
||||
if (lol_is_enabled) {
|
||||
print('liveobjectlist|lol <command> - live object list tracking.');
|
||||
print(' where <command> can be:');
|
||||
print(' c[apture] - captures a LOL list.');
|
||||
print(' clear|del[ete] <id>|all - clears LOL of id <id>.');
|
||||
print(' If \'all\' is unspecified instead, will clear all.');
|
||||
print(' diff <id1> <id2> [<dump options>]');
|
||||
print(' - prints the diff between LOLs id1 and id2.');
|
||||
print(' - also see <dump options> below.');
|
||||
print(' getid <address> - gets the obj id for the specified address if available.');
|
||||
print(' The address must be in hex form prefixed with 0x.');
|
||||
print(' inf[o] [<N>] - lists summary info of all LOL lists.');
|
||||
print(' If N is specified, will print N items at a time.');
|
||||
print(' [l[ist]] <id> [<dump options>]');
|
||||
print(' - prints the listing of objects in LOL id.');
|
||||
print(' - also see <dump options> below.');
|
||||
print(' reset - clears all LOL lists.');
|
||||
print(' ret[ainers] <id> [<dump options>]');
|
||||
print(' - prints the list of retainers of obj id.');
|
||||
print(' - also see <dump options> below.');
|
||||
print(' path <id1> <id2> - prints the retaining path from obj id1 to id2.');
|
||||
print(' If only one id is specified, will print the path from');
|
||||
print(' roots to the specified object if available.');
|
||||
print(' print <id> - prints the obj for the specified obj id if available.');
|
||||
print('');
|
||||
print(' <dump options> includes:');
|
||||
print(' [v[erbose]] - do verbose dump.');
|
||||
print(' [<N>] - dump N items at a time. Implies verbose dump.');
|
||||
print(' If unspecified, N will default to '+
|
||||
kDefaultLolLinesToPrintAtATime+'. Max N is '+
|
||||
kMaxLolLinesToPrintAtATime+'.');
|
||||
print(' [i[ndex] <i>] - start dump from index i. Implies verbose dump.');
|
||||
print(' [t[ype] <type>] - filter by type.');
|
||||
print(' [sp[ace] <space name>] - filter by heap space where <space name> is one of');
|
||||
print(' { cell, code, lo, map, new, old-data, old-pointer }.');
|
||||
print('');
|
||||
print(' If the verbose option, or an option that implies a verbose dump');
|
||||
print(' is specified, then a verbose dump will requested. Else, a summary dump');
|
||||
print(' will be requested.');
|
||||
print('');
|
||||
}
|
||||
|
||||
print('trace compile');
|
||||
// hidden command: trace debug json - toggles tracing of debug json packets
|
||||
print('');
|
||||
@ -1339,6 +1697,237 @@ function refObjectToString_(protocolPackage, handle) {
|
||||
}
|
||||
|
||||
|
||||
function decodeLolCaptureResponse(body) {
|
||||
var result;
|
||||
result = 'Captured live object list '+ body.id +
|
||||
': count '+ body.count + ' size ' + body.size;
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
function decodeLolDeleteResponse(body) {
|
||||
var result;
|
||||
result = 'Deleted live object list '+ body.id;
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
function digitsIn(value) {
|
||||
var digits = 0;
|
||||
if (value === 0) value = 1;
|
||||
while (value >= 1) {
|
||||
digits++;
|
||||
value /= 10;
|
||||
}
|
||||
return digits;
|
||||
}
|
||||
|
||||
|
||||
function padding(value, max_digits) {
|
||||
var padding_digits = max_digits - digitsIn(value);
|
||||
var padding = '';
|
||||
while (padding_digits > 0) {
|
||||
padding += ' ';
|
||||
padding_digits--;
|
||||
}
|
||||
return padding;
|
||||
}
|
||||
|
||||
|
||||
function decodeLolInfoResponse(body) {
|
||||
var result;
|
||||
var lists = body.lists;
|
||||
var length = lists.length;
|
||||
var first_index = body.first_index + 1;
|
||||
var has_more = ((first_index + length) <= body.count);
|
||||
result = 'captured live object lists';
|
||||
if (has_more || (first_index != 1)) {
|
||||
result += ' ['+ length +' of '+ body.count +
|
||||
': starting from '+ first_index +']';
|
||||
}
|
||||
result += ':\n';
|
||||
var max_digits = digitsIn(body.count);
|
||||
var last_count = 0;
|
||||
var last_size = 0;
|
||||
for (var i = 0; i < length; i++) {
|
||||
var entry = lists[i];
|
||||
var count = entry.count;
|
||||
var size = entry.size;
|
||||
var index = first_index + i;
|
||||
result += ' [' + padding(index, max_digits) + index + '] id '+ entry.id +
|
||||
': count '+ count;
|
||||
if (last_count > 0) {
|
||||
result += '(+' + (count - last_count) + ')';
|
||||
}
|
||||
result += ' size '+ size;
|
||||
if (last_size > 0) {
|
||||
result += '(+' + (size - last_size) + ')';
|
||||
}
|
||||
result += '\n';
|
||||
last_count = count;
|
||||
last_size = size;
|
||||
}
|
||||
result += ' total: '+length+' lists\n';
|
||||
if (has_more) {
|
||||
result += ' -- press <enter> for more --\n';
|
||||
} else {
|
||||
repeat_cmd_line = '';
|
||||
}
|
||||
if (length === 0) result += ' none\n';
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
function decodeLolListResponse(body, title) {
|
||||
|
||||
var result;
|
||||
var total_count = body.count;
|
||||
var total_size = body.size;
|
||||
var length;
|
||||
var max_digits;
|
||||
var i;
|
||||
var entry;
|
||||
var index;
|
||||
|
||||
var max_count_digits = digitsIn(total_count);
|
||||
var max_size_digits;
|
||||
|
||||
var summary = body.summary;
|
||||
if (summary) {
|
||||
|
||||
var roots_count = 0;
|
||||
var found_root = body.found_root || 0;
|
||||
var found_weak_root = body.found_weak_root || 0;
|
||||
|
||||
// Print the summary result:
|
||||
result = 'summary of objects:\n';
|
||||
length = summary.length;
|
||||
if (found_root !== 0) {
|
||||
roots_count++;
|
||||
}
|
||||
if (found_weak_root !== 0) {
|
||||
roots_count++;
|
||||
}
|
||||
max_digits = digitsIn(length + roots_count);
|
||||
max_size_digits = digitsIn(total_size);
|
||||
|
||||
index = 1;
|
||||
if (found_root !== 0) {
|
||||
result += ' [' + padding(index, max_digits) + index + '] ' +
|
||||
' count '+ 1 + padding(0, max_count_digits) +
|
||||
' '+ padding(0, max_size_digits+1) +
|
||||
' : <root>\n';
|
||||
index++;
|
||||
}
|
||||
if (found_weak_root !== 0) {
|
||||
result += ' [' + padding(index, max_digits) + index + '] ' +
|
||||
' count '+ 1 + padding(0, max_count_digits) +
|
||||
' '+ padding(0, max_size_digits+1) +
|
||||
' : <weak root>\n';
|
||||
index++;
|
||||
}
|
||||
|
||||
for (i = 0; i < length; i++) {
|
||||
entry = summary[i];
|
||||
var count = entry.count;
|
||||
var size = entry.size;
|
||||
result += ' [' + padding(index, max_digits) + index + '] ' +
|
||||
' count '+ count + padding(count, max_count_digits) +
|
||||
' size '+ size + padding(size, max_size_digits) +
|
||||
' : <' + entry.desc + '>\n';
|
||||
index++;
|
||||
}
|
||||
result += '\n total count: '+(total_count+roots_count)+'\n';
|
||||
if (body.size) {
|
||||
result += ' total size: '+body.size+'\n';
|
||||
}
|
||||
|
||||
} else {
|
||||
// Print the full dump result:
|
||||
var first_index = body.first_index + 1;
|
||||
var elements = body.elements;
|
||||
length = elements.length;
|
||||
var has_more = ((first_index + length) <= total_count);
|
||||
result = title;
|
||||
if (has_more || (first_index != 1)) {
|
||||
result += ' ['+ length +' of '+ total_count +
|
||||
': starting from '+ first_index +']';
|
||||
}
|
||||
result += ':\n';
|
||||
if (length === 0) result += ' none\n';
|
||||
max_digits = digitsIn(length);
|
||||
|
||||
var max_id = 0;
|
||||
var max_size = 0;
|
||||
for (i = 0; i < length; i++) {
|
||||
entry = elements[i];
|
||||
if (entry.id > max_id) max_id = entry.id;
|
||||
if (entry.size > max_size) max_size = entry.size;
|
||||
}
|
||||
var max_id_digits = digitsIn(max_id);
|
||||
max_size_digits = digitsIn(max_size);
|
||||
|
||||
for (i = 0; i < length; i++) {
|
||||
entry = elements[i];
|
||||
index = first_index + i;
|
||||
result += ' ['+ padding(index, max_digits) + index +']';
|
||||
if (entry.id !== 0) {
|
||||
result += ' @' + entry.id + padding(entry.id, max_id_digits) +
|
||||
': size ' + entry.size + ', ' +
|
||||
padding(entry.size, max_size_digits) + entry.desc + '\n';
|
||||
} else {
|
||||
// Must be a root or weak root:
|
||||
result += ' ' + entry.desc + '\n';
|
||||
}
|
||||
}
|
||||
if (has_more) {
|
||||
result += ' -- press <enter> for more --\n';
|
||||
} else {
|
||||
repeat_cmd_line = '';
|
||||
}
|
||||
if (length === 0) result += ' none\n';
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
function decodeLolDiffResponse(body) {
|
||||
var title = 'objects';
|
||||
return decodeLolListResponse(body, title);
|
||||
}
|
||||
|
||||
|
||||
function decodeLolRetainersResponse(body) {
|
||||
var title = 'retainers for @' + body.id;
|
||||
return decodeLolListResponse(body, title);
|
||||
}
|
||||
|
||||
|
||||
function decodeLolPathResponse(body) {
|
||||
return body.path;
|
||||
}
|
||||
|
||||
|
||||
function decodeLolResetResponse(body) {
|
||||
return 'Reset all live object lists.';
|
||||
}
|
||||
|
||||
|
||||
function decodeLolGetIdResponse(body) {
|
||||
if (body.id == 0) {
|
||||
return 'Address is invalid, or object has been moved or collected';
|
||||
}
|
||||
return 'obj id is @' + body.id;
|
||||
}
|
||||
|
||||
|
||||
function decodeLolPrintResponse(body) {
|
||||
return body.dump;
|
||||
}
|
||||
|
||||
|
||||
// Rounds number 'num' to 'length' decimal places.
|
||||
function roundNumber(num, length) {
|
||||
var factor = Math.pow(10, length);
|
||||
@ -1510,6 +2099,7 @@ function DebugResponseDetails(response) {
|
||||
|
||||
case 'evaluate':
|
||||
case 'lookup':
|
||||
case 'getobj':
|
||||
if (last_cmd == 'p' || last_cmd == 'print') {
|
||||
result = body.text;
|
||||
} else {
|
||||
@ -1671,6 +2261,34 @@ function DebugResponseDetails(response) {
|
||||
}
|
||||
break;
|
||||
|
||||
case 'lol-capture':
|
||||
details.text = decodeLolCaptureResponse(body);
|
||||
break;
|
||||
case 'lol-delete':
|
||||
details.text = decodeLolDeleteResponse(body);
|
||||
break;
|
||||
case 'lol-diff':
|
||||
details.text = decodeLolDiffResponse(body);
|
||||
break;
|
||||
case 'lol-getid':
|
||||
details.text = decodeLolGetIdResponse(body);
|
||||
break;
|
||||
case 'lol-info':
|
||||
details.text = decodeLolInfoResponse(body);
|
||||
break;
|
||||
case 'lol-print':
|
||||
details.text = decodeLolPrintResponse(body);
|
||||
break;
|
||||
case 'lol-reset':
|
||||
details.text = decodeLolResetResponse(body);
|
||||
break;
|
||||
case 'lol-retainers':
|
||||
details.text = decodeLolRetainersResponse(body);
|
||||
break;
|
||||
case 'lol-path':
|
||||
details.text = decodeLolPathResponse(body);
|
||||
break;
|
||||
|
||||
default:
|
||||
details.text =
|
||||
'Response for unknown command \'' + response.command() + '\'' +
|
||||
|
121
deps/v8/src/debug-debugger.js
vendored
121
deps/v8/src/debug-debugger.js
vendored
@ -109,6 +109,7 @@ var debugger_flags = {
|
||||
}
|
||||
},
|
||||
};
|
||||
var lol_is_enabled = %HasLOLEnabled();
|
||||
|
||||
|
||||
// Create a new break point object and add it to the list of break points.
|
||||
@ -1391,6 +1392,8 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(json_request)
|
||||
this.scopeRequest_(request, response);
|
||||
} else if (request.command == 'evaluate') {
|
||||
this.evaluateRequest_(request, response);
|
||||
} else if (lol_is_enabled && request.command == 'getobj') {
|
||||
this.getobjRequest_(request, response);
|
||||
} else if (request.command == 'lookup') {
|
||||
this.lookupRequest_(request, response);
|
||||
} else if (request.command == 'references') {
|
||||
@ -1418,6 +1421,28 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(json_request)
|
||||
} else if (request.command == 'gc') {
|
||||
this.gcRequest_(request, response);
|
||||
|
||||
// LiveObjectList tools:
|
||||
} else if (lol_is_enabled && request.command == 'lol-capture') {
|
||||
this.lolCaptureRequest_(request, response);
|
||||
} else if (lol_is_enabled && request.command == 'lol-delete') {
|
||||
this.lolDeleteRequest_(request, response);
|
||||
} else if (lol_is_enabled && request.command == 'lol-diff') {
|
||||
this.lolDiffRequest_(request, response);
|
||||
} else if (lol_is_enabled && request.command == 'lol-getid') {
|
||||
this.lolGetIdRequest_(request, response);
|
||||
} else if (lol_is_enabled && request.command == 'lol-info') {
|
||||
this.lolInfoRequest_(request, response);
|
||||
} else if (lol_is_enabled && request.command == 'lol-reset') {
|
||||
this.lolResetRequest_(request, response);
|
||||
} else if (lol_is_enabled && request.command == 'lol-retainers') {
|
||||
this.lolRetainersRequest_(request, response);
|
||||
} else if (lol_is_enabled && request.command == 'lol-path') {
|
||||
this.lolPathRequest_(request, response);
|
||||
} else if (lol_is_enabled && request.command == 'lol-print') {
|
||||
this.lolPrintRequest_(request, response);
|
||||
} else if (lol_is_enabled && request.command == 'lol-stats') {
|
||||
this.lolStatsRequest_(request, response);
|
||||
|
||||
} else {
|
||||
throw new Error('Unknown command "' + request.command + '" in request');
|
||||
}
|
||||
@ -2011,6 +2036,24 @@ DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
|
||||
};
|
||||
|
||||
|
||||
DebugCommandProcessor.prototype.getobjRequest_ = function(request, response) {
|
||||
if (!request.arguments) {
|
||||
return response.failed('Missing arguments');
|
||||
}
|
||||
|
||||
// Pull out arguments.
|
||||
var obj_id = request.arguments.obj_id;
|
||||
|
||||
// Check for legal arguments.
|
||||
if (IS_UNDEFINED(obj_id)) {
|
||||
return response.failed('Argument "obj_id" missing');
|
||||
}
|
||||
|
||||
// Dump the object.
|
||||
response.body = MakeMirror(%GetLOLObj(obj_id));
|
||||
};
|
||||
|
||||
|
||||
DebugCommandProcessor.prototype.lookupRequest_ = function(request, response) {
|
||||
if (!request.arguments) {
|
||||
return response.failed('Missing arguments');
|
||||
@ -2341,6 +2384,84 @@ DebugCommandProcessor.prototype.gcRequest_ = function(request, response) {
|
||||
};
|
||||
|
||||
|
||||
DebugCommandProcessor.prototype.lolCaptureRequest_ =
|
||||
function(request, response) {
|
||||
response.body = %CaptureLOL();
|
||||
};
|
||||
|
||||
|
||||
DebugCommandProcessor.prototype.lolDeleteRequest_ =
|
||||
function(request, response) {
|
||||
var id = request.arguments.id;
|
||||
var result = %DeleteLOL(id);
|
||||
if (result) {
|
||||
response.body = { id: id };
|
||||
} else {
|
||||
response.failed('Failed to delete: live object list ' + id + ' not found.');
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
DebugCommandProcessor.prototype.lolDiffRequest_ = function(request, response) {
|
||||
var id1 = request.arguments.id1;
|
||||
var id2 = request.arguments.id2;
|
||||
var verbose = request.arguments.verbose;
|
||||
var filter = request.arguments.filter;
|
||||
if (verbose === true) {
|
||||
var start = request.arguments.start;
|
||||
var count = request.arguments.count;
|
||||
response.body = %DumpLOL(id1, id2, start, count, filter);
|
||||
} else {
|
||||
response.body = %SummarizeLOL(id1, id2, filter);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
DebugCommandProcessor.prototype.lolGetIdRequest_ = function(request, response) {
|
||||
var address = request.arguments.address;
|
||||
response.body = {};
|
||||
response.body.id = %GetLOLObjId(address);
|
||||
};
|
||||
|
||||
|
||||
DebugCommandProcessor.prototype.lolInfoRequest_ = function(request, response) {
|
||||
var start = request.arguments.start;
|
||||
var count = request.arguments.count;
|
||||
response.body = %InfoLOL(start, count);
|
||||
};
|
||||
|
||||
|
||||
DebugCommandProcessor.prototype.lolResetRequest_ = function(request, response) {
|
||||
%ResetLOL();
|
||||
};
|
||||
|
||||
|
||||
DebugCommandProcessor.prototype.lolRetainersRequest_ =
|
||||
function(request, response) {
|
||||
var id = request.arguments.id;
|
||||
var verbose = request.arguments.verbose;
|
||||
var start = request.arguments.start;
|
||||
var count = request.arguments.count;
|
||||
var filter = request.arguments.filter;
|
||||
|
||||
response.body = %GetLOLObjRetainers(id, Mirror.prototype, verbose,
|
||||
start, count, filter);
|
||||
};
|
||||
|
||||
|
||||
DebugCommandProcessor.prototype.lolPathRequest_ = function(request, response) {
|
||||
var id1 = request.arguments.id1;
|
||||
var id2 = request.arguments.id2;
|
||||
response.body = {};
|
||||
response.body.path = %GetLOLPath(id1, id2, Mirror.prototype);
|
||||
};
|
||||
|
||||
|
||||
DebugCommandProcessor.prototype.lolPrintRequest_ = function(request, response) {
|
||||
var id = request.arguments.id;
|
||||
response.body = {};
|
||||
response.body.dump = %PrintLOLObj(id);
|
||||
};
|
||||
|
||||
|
||||
// Check whether the previously processed command caused the VM to become
|
||||
|
3
deps/v8/src/debug.cc
vendored
3
deps/v8/src/debug.cc
vendored
@ -836,7 +836,8 @@ bool Debug::Load() {
|
||||
Handle<String> key = Factory::LookupAsciiSymbol("builtins");
|
||||
Handle<GlobalObject> global = Handle<GlobalObject>(context->global());
|
||||
RETURN_IF_EMPTY_HANDLE_VALUE(
|
||||
SetProperty(global, key, Handle<Object>(global->builtins()), NONE),
|
||||
SetProperty(global, key, Handle<Object>(global->builtins()),
|
||||
NONE, kNonStrictMode),
|
||||
false);
|
||||
|
||||
// Compile the JavaScript for the debugger in the debugger context.
|
||||
|
14
deps/v8/src/flag-definitions.h
vendored
14
deps/v8/src/flag-definitions.h
vendored
@ -110,7 +110,6 @@ DEFINE_bool(use_lithium, true, "use lithium code generator")
|
||||
DEFINE_bool(use_range, true, "use hydrogen range analysis")
|
||||
DEFINE_bool(eliminate_dead_phis, true, "eliminate dead phis")
|
||||
DEFINE_bool(use_gvn, true, "use hydrogen global value numbering")
|
||||
DEFINE_bool(use_peeling, false, "use loop peeling")
|
||||
DEFINE_bool(use_canonicalizing, true, "use hydrogen instruction canonicalizing")
|
||||
DEFINE_bool(use_inlining, true, "use function inlining")
|
||||
DEFINE_bool(limit_inlining, true, "limit code size growth from inlining")
|
||||
@ -135,11 +134,8 @@ DEFINE_bool(deoptimize_uncommon_cases, true, "deoptimize uncommon cases")
|
||||
DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining")
|
||||
DEFINE_bool(aggressive_loop_invariant_motion, true,
|
||||
"aggressive motion of instructions out of loops")
|
||||
#ifdef V8_TARGET_ARCH_X64
|
||||
DEFINE_bool(use_osr, false, "use on-stack replacement")
|
||||
#else
|
||||
DEFINE_bool(use_osr, true, "use on-stack replacement")
|
||||
#endif
|
||||
|
||||
DEFINE_bool(trace_osr, false, "trace on-stack replacement")
|
||||
DEFINE_int(stress_runs, 0, "number of stress runs")
|
||||
DEFINE_bool(optimize_closures, true, "optimize closures")
|
||||
@ -270,6 +266,12 @@ DEFINE_bool(use_idle_notification, true,
|
||||
// ic.cc
|
||||
DEFINE_bool(use_ic, true, "use inline caching")
|
||||
|
||||
#ifdef LIVE_OBJECT_LIST
|
||||
// liveobjectlist.cc
|
||||
DEFINE_string(lol_workdir, NULL, "path for lol temp files")
|
||||
DEFINE_bool(verify_lol, false, "perform debugging verification for lol")
|
||||
#endif
|
||||
|
||||
// macro-assembler-ia32.cc
|
||||
DEFINE_bool(native_code_counters, false,
|
||||
"generate extra code for manipulating stats counters")
|
||||
@ -358,7 +360,7 @@ DEFINE_bool(remote_debugger, false, "Connect JavaScript debugger to the "
|
||||
"debugger agent in another process")
|
||||
DEFINE_bool(debugger_agent, false, "Enable debugger agent")
|
||||
DEFINE_int(debugger_port, 5858, "Port to use for remote debugging")
|
||||
DEFINE_string(map_counters, NULL, "Map counters to a file")
|
||||
DEFINE_string(map_counters, "", "Map counters to a file")
|
||||
DEFINE_args(js_arguments, JSArguments(),
|
||||
"Pass all remaining arguments to the script. Alias for \"--\".")
|
||||
|
||||
|
4
deps/v8/src/frame-element.h
vendored
4
deps/v8/src/frame-element.h
vendored
@ -113,6 +113,10 @@ class FrameElement BASE_EMBEDDED {
|
||||
|
||||
static ZoneObjectList* ConstantList();
|
||||
|
||||
static bool ConstantPoolOverflowed() {
|
||||
return !DataField::is_valid(ConstantList()->length());
|
||||
}
|
||||
|
||||
// Clear the constants indirection table.
|
||||
static void ClearConstantList() {
|
||||
ConstantList()->Clear();
|
||||
|
16
deps/v8/src/full-codegen.cc
vendored
16
deps/v8/src/full-codegen.cc
vendored
@ -739,25 +739,13 @@ void FullCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
|
||||
case Token::SHL:
|
||||
case Token::SHR:
|
||||
case Token::SAR: {
|
||||
// Figure out if either of the operands is a constant.
|
||||
ConstantOperand constant = ShouldInlineSmiCase(op)
|
||||
? GetConstantOperand(op, left, right)
|
||||
: kNoConstants;
|
||||
|
||||
// Load only the operands that we need to materialize.
|
||||
if (constant == kNoConstants) {
|
||||
// Load both operands.
|
||||
VisitForStackValue(left);
|
||||
VisitForAccumulatorValue(right);
|
||||
} else if (constant == kRightConstant) {
|
||||
VisitForAccumulatorValue(left);
|
||||
} else {
|
||||
ASSERT(constant == kLeftConstant);
|
||||
VisitForAccumulatorValue(right);
|
||||
}
|
||||
|
||||
SetSourcePosition(expr->position());
|
||||
if (ShouldInlineSmiCase(op)) {
|
||||
EmitInlineSmiBinaryOp(expr, op, mode, left, right, constant);
|
||||
EmitInlineSmiBinaryOp(expr, op, mode, left, right);
|
||||
} else {
|
||||
EmitBinaryOp(op, mode);
|
||||
}
|
||||
|
48
deps/v8/src/full-codegen.h
vendored
48
deps/v8/src/full-codegen.h
vendored
@ -274,12 +274,6 @@ class FullCodeGenerator: public AstVisitor {
|
||||
ForwardBailoutStack* const parent_;
|
||||
};
|
||||
|
||||
enum ConstantOperand {
|
||||
kNoConstants,
|
||||
kLeftConstant,
|
||||
kRightConstant
|
||||
};
|
||||
|
||||
// Type of a member function that generates inline code for a native function.
|
||||
typedef void (FullCodeGenerator::*InlineFunctionGenerator)
|
||||
(ZoneList<Expression*>*);
|
||||
@ -298,11 +292,6 @@ class FullCodeGenerator: public AstVisitor {
|
||||
// operation.
|
||||
bool ShouldInlineSmiCase(Token::Value op);
|
||||
|
||||
// Compute which (if any) of the operands is a compile-time constant.
|
||||
ConstantOperand GetConstantOperand(Token::Value op,
|
||||
Expression* left,
|
||||
Expression* right);
|
||||
|
||||
// Helper function to convert a pure value into a test context. The value
|
||||
// is expected on the stack or the accumulator, depending on the platform.
|
||||
// See the platform-specific implementation for details.
|
||||
@ -432,6 +421,14 @@ class FullCodeGenerator: public AstVisitor {
|
||||
Label* done);
|
||||
void EmitVariableLoad(Variable* expr);
|
||||
|
||||
enum ResolveEvalFlag {
|
||||
SKIP_CONTEXT_LOOKUP,
|
||||
PERFORM_CONTEXT_LOOKUP
|
||||
};
|
||||
|
||||
// Expects the arguments and the function already pushed.
|
||||
void EmitResolvePossiblyDirectEval(ResolveEvalFlag flag, int arg_count);
|
||||
|
||||
// Platform-specific support for allocating a new closure based on
|
||||
// the given function info.
|
||||
void EmitNewClosure(Handle<SharedFunctionInfo> info, bool pretenure);
|
||||
@ -457,34 +454,7 @@ class FullCodeGenerator: public AstVisitor {
|
||||
Token::Value op,
|
||||
OverwriteMode mode,
|
||||
Expression* left,
|
||||
Expression* right,
|
||||
ConstantOperand constant);
|
||||
|
||||
void EmitConstantSmiBinaryOp(Expression* expr,
|
||||
Token::Value op,
|
||||
OverwriteMode mode,
|
||||
bool left_is_constant_smi,
|
||||
Smi* value);
|
||||
|
||||
void EmitConstantSmiBitOp(Expression* expr,
|
||||
Token::Value op,
|
||||
OverwriteMode mode,
|
||||
Smi* value);
|
||||
|
||||
void EmitConstantSmiShiftOp(Expression* expr,
|
||||
Token::Value op,
|
||||
OverwriteMode mode,
|
||||
Smi* value);
|
||||
|
||||
void EmitConstantSmiAdd(Expression* expr,
|
||||
OverwriteMode mode,
|
||||
bool left_is_constant_smi,
|
||||
Smi* value);
|
||||
|
||||
void EmitConstantSmiSub(Expression* expr,
|
||||
OverwriteMode mode,
|
||||
bool left_is_constant_smi,
|
||||
Smi* value);
|
||||
Expression* right);
|
||||
|
||||
// Assign to the given expression as if via '='. The right-hand-side value
|
||||
// is expected in the accumulator.
|
||||
|
4
deps/v8/src/handles-inl.h
vendored
4
deps/v8/src/handles-inl.h
vendored
@ -36,14 +36,14 @@
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
template<class T>
|
||||
template<typename T>
|
||||
Handle<T>::Handle(T* obj) {
|
||||
ASSERT(!obj->IsFailure());
|
||||
location_ = HandleScope::CreateHandle(obj);
|
||||
}
|
||||
|
||||
|
||||
template <class T>
|
||||
template <typename T>
|
||||
inline T* Handle<T>::operator*() const {
|
||||
ASSERT(location_ != NULL);
|
||||
ASSERT(reinterpret_cast<Address>(*location_) != kHandleZapValue);
|
||||
|
24
deps/v8/src/handles.cc
vendored
24
deps/v8/src/handles.cc
vendored
@ -242,17 +242,21 @@ Handle<Object> SetPrototype(Handle<JSFunction> function,
|
||||
Handle<Object> SetProperty(Handle<JSObject> object,
|
||||
Handle<String> key,
|
||||
Handle<Object> value,
|
||||
PropertyAttributes attributes) {
|
||||
CALL_HEAP_FUNCTION(object->SetProperty(*key, *value, attributes), Object);
|
||||
PropertyAttributes attributes,
|
||||
StrictModeFlag strict) {
|
||||
CALL_HEAP_FUNCTION(object->SetProperty(*key, *value, attributes, strict),
|
||||
Object);
|
||||
}
|
||||
|
||||
|
||||
Handle<Object> SetProperty(Handle<Object> object,
|
||||
Handle<Object> key,
|
||||
Handle<Object> value,
|
||||
PropertyAttributes attributes) {
|
||||
PropertyAttributes attributes,
|
||||
StrictModeFlag strict) {
|
||||
CALL_HEAP_FUNCTION(
|
||||
Runtime::SetObjectProperty(object, key, value, attributes), Object);
|
||||
Runtime::SetObjectProperty(object, key, value, attributes, strict),
|
||||
Object);
|
||||
}
|
||||
|
||||
|
||||
@ -304,10 +308,12 @@ void SetLocalPropertyNoThrow(Handle<JSObject> object,
|
||||
Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object,
|
||||
Handle<String> key,
|
||||
Handle<Object> value,
|
||||
PropertyAttributes attributes) {
|
||||
PropertyAttributes attributes,
|
||||
StrictModeFlag strict) {
|
||||
CALL_HEAP_FUNCTION(object->SetPropertyWithInterceptor(*key,
|
||||
*value,
|
||||
attributes),
|
||||
attributes,
|
||||
strict),
|
||||
Object);
|
||||
}
|
||||
|
||||
@ -863,10 +869,12 @@ bool CompileLazyInLoop(Handle<JSFunction> function,
|
||||
}
|
||||
|
||||
|
||||
bool CompileOptimized(Handle<JSFunction> function, int osr_ast_id) {
|
||||
bool CompileOptimized(Handle<JSFunction> function,
|
||||
int osr_ast_id,
|
||||
ClearExceptionFlag flag) {
|
||||
CompilationInfo info(function);
|
||||
info.SetOptimizing(osr_ast_id);
|
||||
return CompileLazyHelper(&info, KEEP_EXCEPTION);
|
||||
return CompileLazyHelper(&info, flag);
|
||||
}
|
||||
|
||||
|
||||
|
63
deps/v8/src/handles.h
vendored
63
deps/v8/src/handles.h
vendored
@ -39,7 +39,7 @@ namespace internal {
|
||||
// Handles are only valid within a HandleScope.
|
||||
// When a handle is created for an object a cell is allocated in the heap.
|
||||
|
||||
template<class T>
|
||||
template<typename T>
|
||||
class Handle {
|
||||
public:
|
||||
INLINE(explicit Handle(T** location)) { location_ = location; }
|
||||
@ -112,15 +112,7 @@ class HandleScope {
|
||||
}
|
||||
|
||||
~HandleScope() {
|
||||
current_.next = prev_next_;
|
||||
current_.level--;
|
||||
if (current_.limit != prev_limit_) {
|
||||
current_.limit = prev_limit_;
|
||||
DeleteExtensions();
|
||||
}
|
||||
#ifdef DEBUG
|
||||
ZapRange(prev_next_, prev_limit_);
|
||||
#endif
|
||||
CloseScope();
|
||||
}
|
||||
|
||||
// Counts the number of allocated handles.
|
||||
@ -148,6 +140,26 @@ class HandleScope {
|
||||
static Address current_limit_address();
|
||||
static Address current_level_address();
|
||||
|
||||
// Closes the HandleScope (invalidating all handles
|
||||
// created in the scope of the HandleScope) and returns
|
||||
// a Handle backed by the parent scope holding the
|
||||
// value of the argument handle.
|
||||
template <typename T>
|
||||
Handle<T> CloseAndEscape(Handle<T> handle_value) {
|
||||
T* value = *handle_value;
|
||||
// Throw away all handles in the current scope.
|
||||
CloseScope();
|
||||
// Allocate one handle in the parent scope.
|
||||
ASSERT(current_.level > 0);
|
||||
Handle<T> result(CreateHandle<T>(value));
|
||||
// Reinitialize the current scope (so that it's ready
|
||||
// to be used or closed again).
|
||||
prev_next_ = current_.next;
|
||||
prev_limit_ = current_.limit;
|
||||
current_.level++;
|
||||
return result;
|
||||
}
|
||||
|
||||
private:
|
||||
// Prevent heap allocation or illegal handle scopes.
|
||||
HandleScope(const HandleScope&);
|
||||
@ -155,9 +167,23 @@ class HandleScope {
|
||||
void* operator new(size_t size);
|
||||
void operator delete(void* size_t);
|
||||
|
||||
inline void CloseScope() {
|
||||
current_.next = prev_next_;
|
||||
current_.level--;
|
||||
if (current_.limit != prev_limit_) {
|
||||
current_.limit = prev_limit_;
|
||||
DeleteExtensions();
|
||||
}
|
||||
#ifdef DEBUG
|
||||
ZapRange(prev_next_, prev_limit_);
|
||||
#endif
|
||||
}
|
||||
|
||||
static v8::ImplementationUtilities::HandleScopeData current_;
|
||||
Object** const prev_next_;
|
||||
Object** const prev_limit_;
|
||||
// Holds values on entry. The prev_next_ value is never NULL
|
||||
// on_entry, but is set to NULL when this scope is closed.
|
||||
Object** prev_next_;
|
||||
Object** prev_limit_;
|
||||
|
||||
// Extend the handle scope making room for more handles.
|
||||
static internal::Object** Extend();
|
||||
@ -197,12 +223,14 @@ Handle<String> FlattenGetString(Handle<String> str);
|
||||
Handle<Object> SetProperty(Handle<JSObject> object,
|
||||
Handle<String> key,
|
||||
Handle<Object> value,
|
||||
PropertyAttributes attributes);
|
||||
PropertyAttributes attributes,
|
||||
StrictModeFlag strict);
|
||||
|
||||
Handle<Object> SetProperty(Handle<Object> object,
|
||||
Handle<Object> key,
|
||||
Handle<Object> value,
|
||||
PropertyAttributes attributes);
|
||||
PropertyAttributes attributes,
|
||||
StrictModeFlag strict);
|
||||
|
||||
Handle<Object> ForceSetProperty(Handle<JSObject> object,
|
||||
Handle<Object> key,
|
||||
@ -233,7 +261,8 @@ void SetLocalPropertyNoThrow(Handle<JSObject> object,
|
||||
Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object,
|
||||
Handle<String> key,
|
||||
Handle<Object> value,
|
||||
PropertyAttributes attributes);
|
||||
PropertyAttributes attributes,
|
||||
StrictModeFlag strict);
|
||||
|
||||
Handle<Object> SetElement(Handle<JSObject> object,
|
||||
uint32_t index,
|
||||
@ -354,7 +383,9 @@ bool CompileLazy(Handle<JSFunction> function, ClearExceptionFlag flag);
|
||||
|
||||
bool CompileLazyInLoop(Handle<JSFunction> function, ClearExceptionFlag flag);
|
||||
|
||||
bool CompileOptimized(Handle<JSFunction> function, int osr_ast_id);
|
||||
bool CompileOptimized(Handle<JSFunction> function,
|
||||
int osr_ast_id,
|
||||
ClearExceptionFlag flag);
|
||||
|
||||
class NoHandleAllocation BASE_EMBEDDED {
|
||||
public:
|
||||
|
45
deps/v8/src/heap-profiler.cc
vendored
45
deps/v8/src/heap-profiler.cc
vendored
@ -911,22 +911,27 @@ static JSObjectsCluster HeapObjectAsCluster(HeapObject* object) {
|
||||
class CountingRetainersIterator {
|
||||
public:
|
||||
CountingRetainersIterator(const JSObjectsCluster& child_cluster,
|
||||
HeapEntriesAllocator* allocator,
|
||||
HeapEntriesMap* map)
|
||||
: child_(ClusterAsHeapObject(child_cluster)), map_(map) {
|
||||
: child_(ClusterAsHeapObject(child_cluster)),
|
||||
allocator_(allocator),
|
||||
map_(map) {
|
||||
if (map_->Map(child_) == NULL)
|
||||
map_->Pair(child_, HeapEntriesMap::kHeapEntryPlaceholder);
|
||||
map_->Pair(child_, allocator_, HeapEntriesMap::kHeapEntryPlaceholder);
|
||||
}
|
||||
|
||||
void Call(const JSObjectsCluster& cluster,
|
||||
const NumberAndSizeInfo& number_and_size) {
|
||||
if (map_->Map(ClusterAsHeapObject(cluster)) == NULL)
|
||||
map_->Pair(ClusterAsHeapObject(cluster),
|
||||
allocator_,
|
||||
HeapEntriesMap::kHeapEntryPlaceholder);
|
||||
map_->CountReference(ClusterAsHeapObject(cluster), child_);
|
||||
}
|
||||
|
||||
private:
|
||||
HeapObject* child_;
|
||||
HeapEntriesAllocator* allocator_;
|
||||
HeapEntriesMap* map_;
|
||||
};
|
||||
|
||||
@ -934,6 +939,7 @@ class CountingRetainersIterator {
|
||||
class AllocatingRetainersIterator {
|
||||
public:
|
||||
AllocatingRetainersIterator(const JSObjectsCluster& child_cluster,
|
||||
HeapEntriesAllocator*,
|
||||
HeapEntriesMap* map)
|
||||
: child_(ClusterAsHeapObject(child_cluster)), map_(map) {
|
||||
child_entry_ = map_->Map(child_);
|
||||
@ -966,8 +972,9 @@ template<class RetainersIterator>
|
||||
class AggregatingRetainerTreeIterator {
|
||||
public:
|
||||
explicit AggregatingRetainerTreeIterator(ClustersCoarser* coarser,
|
||||
HeapEntriesAllocator* allocator,
|
||||
HeapEntriesMap* map)
|
||||
: coarser_(coarser), map_(map) {
|
||||
: coarser_(coarser), allocator_(allocator), map_(map) {
|
||||
}
|
||||
|
||||
void Call(const JSObjectsCluster& cluster, JSObjectsClusterTree* tree) {
|
||||
@ -981,25 +988,28 @@ class AggregatingRetainerTreeIterator {
|
||||
tree->ForEach(&retainers_aggregator);
|
||||
tree_to_iterate = &dest_tree_;
|
||||
}
|
||||
RetainersIterator iterator(cluster, map_);
|
||||
RetainersIterator iterator(cluster, allocator_, map_);
|
||||
tree_to_iterate->ForEach(&iterator);
|
||||
}
|
||||
|
||||
private:
|
||||
ClustersCoarser* coarser_;
|
||||
HeapEntriesAllocator* allocator_;
|
||||
HeapEntriesMap* map_;
|
||||
};
|
||||
|
||||
|
||||
class AggregatedRetainerTreeAllocator {
|
||||
class AggregatedRetainerTreeAllocator : public HeapEntriesAllocator {
|
||||
public:
|
||||
AggregatedRetainerTreeAllocator(HeapSnapshot* snapshot,
|
||||
int* root_child_index)
|
||||
: snapshot_(snapshot), root_child_index_(root_child_index) {
|
||||
}
|
||||
~AggregatedRetainerTreeAllocator() { }
|
||||
|
||||
HeapEntry* GetEntry(
|
||||
HeapObject* obj, int children_count, int retainers_count) {
|
||||
HeapEntry* AllocateEntry(
|
||||
HeapThing ptr, int children_count, int retainers_count) {
|
||||
HeapObject* obj = reinterpret_cast<HeapObject*>(ptr);
|
||||
JSObjectsCluster cluster = HeapObjectAsCluster(obj);
|
||||
const char* name = cluster.GetSpecialCaseName();
|
||||
if (name == NULL) {
|
||||
@ -1018,12 +1028,13 @@ class AggregatedRetainerTreeAllocator {
|
||||
|
||||
template<class Iterator>
|
||||
void AggregatedHeapSnapshotGenerator::IterateRetainers(
|
||||
HeapEntriesMap* entries_map) {
|
||||
HeapEntriesAllocator* allocator, HeapEntriesMap* entries_map) {
|
||||
RetainerHeapProfile* p = agg_snapshot_->js_retainer_profile();
|
||||
AggregatingRetainerTreeIterator<Iterator> agg_ret_iter_1(
|
||||
p->coarser(), entries_map);
|
||||
p->coarser(), allocator, entries_map);
|
||||
p->retainers_tree()->ForEach(&agg_ret_iter_1);
|
||||
AggregatingRetainerTreeIterator<Iterator> agg_ret_iter_2(NULL, entries_map);
|
||||
AggregatingRetainerTreeIterator<Iterator> agg_ret_iter_2(
|
||||
NULL, allocator, entries_map);
|
||||
p->aggregator()->output_tree().ForEach(&agg_ret_iter_2);
|
||||
}
|
||||
|
||||
@ -1042,7 +1053,9 @@ void AggregatedHeapSnapshotGenerator::FillHeapSnapshot(HeapSnapshot* snapshot) {
|
||||
agg_snapshot_->js_cons_profile()->ForEach(&counting_cons_iter);
|
||||
histogram_entities_count += counting_cons_iter.entities_count();
|
||||
HeapEntriesMap entries_map;
|
||||
IterateRetainers<CountingRetainersIterator>(&entries_map);
|
||||
int root_child_index = 0;
|
||||
AggregatedRetainerTreeAllocator allocator(snapshot, &root_child_index);
|
||||
IterateRetainers<CountingRetainersIterator>(&allocator, &entries_map);
|
||||
histogram_entities_count += entries_map.entries_count();
|
||||
histogram_children_count += entries_map.total_children_count();
|
||||
histogram_retainers_count += entries_map.total_retainers_count();
|
||||
@ -1056,10 +1069,7 @@ void AggregatedHeapSnapshotGenerator::FillHeapSnapshot(HeapSnapshot* snapshot) {
|
||||
snapshot->AllocateEntries(histogram_entities_count,
|
||||
histogram_children_count,
|
||||
histogram_retainers_count);
|
||||
snapshot->AddEntry(HeapSnapshot::kInternalRootObject,
|
||||
root_children_count,
|
||||
0);
|
||||
int root_child_index = 0;
|
||||
snapshot->AddRootEntry(root_children_count);
|
||||
for (int i = FIRST_NONSTRING_TYPE; i <= kAllStringsType; ++i) {
|
||||
if (agg_snapshot_->info()[i].bytes() > 0) {
|
||||
AddEntryFromAggregatedSnapshot(snapshot,
|
||||
@ -1075,11 +1085,10 @@ void AggregatedHeapSnapshotGenerator::FillHeapSnapshot(HeapSnapshot* snapshot) {
|
||||
AllocatingConstructorHeapProfileIterator alloc_cons_iter(
|
||||
snapshot, &root_child_index);
|
||||
agg_snapshot_->js_cons_profile()->ForEach(&alloc_cons_iter);
|
||||
AggregatedRetainerTreeAllocator allocator(snapshot, &root_child_index);
|
||||
entries_map.UpdateEntries(&allocator);
|
||||
entries_map.AllocateEntries();
|
||||
|
||||
// Fill up references.
|
||||
IterateRetainers<AllocatingRetainersIterator>(&entries_map);
|
||||
IterateRetainers<AllocatingRetainersIterator>(&allocator, &entries_map);
|
||||
|
||||
snapshot->SetDominatorsToSelf();
|
||||
}
|
||||
|
4
deps/v8/src/heap-profiler.h
vendored
4
deps/v8/src/heap-profiler.h
vendored
@ -340,6 +340,7 @@ class AggregatedHeapSnapshot {
|
||||
|
||||
|
||||
class HeapEntriesMap;
|
||||
class HeapEntriesAllocator;
|
||||
class HeapSnapshot;
|
||||
|
||||
class AggregatedHeapSnapshotGenerator {
|
||||
@ -354,7 +355,8 @@ class AggregatedHeapSnapshotGenerator {
|
||||
void CalculateStringsStats();
|
||||
void CollectStats(HeapObject* obj);
|
||||
template<class Iterator>
|
||||
void IterateRetainers(HeapEntriesMap* entries_map);
|
||||
void IterateRetainers(
|
||||
HeapEntriesAllocator* allocator, HeapEntriesMap* entries_map);
|
||||
|
||||
AggregatedHeapSnapshot* agg_snapshot_;
|
||||
};
|
||||
|
15
deps/v8/src/heap.cc
vendored
15
deps/v8/src/heap.cc
vendored
@ -844,8 +844,6 @@ void Heap::MarkCompactPrologue(bool is_compacting) {
|
||||
ContextSlotCache::Clear();
|
||||
DescriptorLookupCache::Clear();
|
||||
|
||||
RuntimeProfiler::MarkCompactPrologue(is_compacting);
|
||||
|
||||
CompilationCache::MarkCompactPrologue();
|
||||
|
||||
CompletelyClearInstanceofCache();
|
||||
@ -1056,20 +1054,13 @@ void Heap::Scavenge() {
|
||||
// Scavenge object reachable from the global contexts list directly.
|
||||
scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_));
|
||||
|
||||
// Scavenge objects reachable from the runtime-profiler sampler
|
||||
// window directly.
|
||||
Object** sampler_window_address = RuntimeProfiler::SamplerWindowAddress();
|
||||
int sampler_window_size = RuntimeProfiler::SamplerWindowSize();
|
||||
scavenge_visitor.VisitPointers(
|
||||
sampler_window_address,
|
||||
sampler_window_address + sampler_window_size);
|
||||
|
||||
new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
|
||||
|
||||
UpdateNewSpaceReferencesInExternalStringTable(
|
||||
&UpdateNewSpaceReferenceInExternalStringTableEntry);
|
||||
|
||||
LiveObjectList::UpdateReferencesForScavengeGC();
|
||||
RuntimeProfiler::UpdateSamplesAfterScavenge();
|
||||
|
||||
ASSERT(new_space_front == new_space_.top());
|
||||
|
||||
@ -5336,7 +5327,11 @@ void PathTracer::ProcessResults() {
|
||||
for (int i = 0; i < object_stack_.length(); i++) {
|
||||
if (i > 0) PrintF("\n |\n |\n V\n\n");
|
||||
Object* obj = object_stack_[i];
|
||||
#ifdef OBJECT_PRINT
|
||||
obj->Print();
|
||||
#else
|
||||
obj->ShortPrint();
|
||||
#endif
|
||||
}
|
||||
PrintF("=====================================\n");
|
||||
}
|
||||
|
403
deps/v8/src/hydrogen.cc
vendored
403
deps/v8/src/hydrogen.cc
vendored
@ -482,128 +482,75 @@ HConstant* HGraph::GetConstantFalse() {
|
||||
}
|
||||
|
||||
|
||||
void HSubgraph::AppendJoin(HBasicBlock* first,
|
||||
HBasicBlock* HGraphBuilder::CreateJoin(HBasicBlock* first,
|
||||
HBasicBlock* second,
|
||||
int join_id) {
|
||||
if (first == NULL) {
|
||||
exit_block_ = second;
|
||||
return second;
|
||||
} else if (second == NULL) {
|
||||
exit_block_ = first;
|
||||
return first;
|
||||
} else {
|
||||
HBasicBlock* join_block = graph_->CreateBasicBlock();
|
||||
first->Goto(join_block);
|
||||
second->Goto(join_block);
|
||||
join_block->SetJoinId(join_id);
|
||||
exit_block_ = join_block;
|
||||
return join_block;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void HSubgraph::ResolveContinue(IterationStatement* statement,
|
||||
HBasicBlock* HGraphBuilder::JoinContinue(IterationStatement* statement,
|
||||
HBasicBlock* exit_block,
|
||||
HBasicBlock* continue_block) {
|
||||
if (continue_block != NULL) {
|
||||
continue_block->SetJoinId(statement->ContinueId());
|
||||
}
|
||||
exit_block_ =
|
||||
JoinBlocks(exit_block(), continue_block, statement->ContinueId());
|
||||
return CreateJoin(exit_block, continue_block, statement->ContinueId());
|
||||
}
|
||||
|
||||
|
||||
HBasicBlock* HSubgraph::JoinBlocks(HBasicBlock* a, HBasicBlock* b, int id) {
|
||||
if (a == NULL) return b;
|
||||
if (b == NULL) return a;
|
||||
HBasicBlock* target = graph_->CreateBasicBlock();
|
||||
a->Goto(target);
|
||||
b->Goto(target);
|
||||
target->SetJoinId(id);
|
||||
return target;
|
||||
}
|
||||
|
||||
|
||||
void HSubgraph::AppendEndless(IterationStatement* statement,
|
||||
HBasicBlock* HGraphBuilder::CreateEndless(IterationStatement* statement,
|
||||
HBasicBlock* body_entry,
|
||||
HBasicBlock* body_exit,
|
||||
HBasicBlock* break_block) {
|
||||
if (exit_block() != NULL) {
|
||||
exit_block()->Goto(body_entry, false);
|
||||
}
|
||||
if (body_exit != NULL) {
|
||||
body_exit->Goto(body_entry, true);
|
||||
}
|
||||
if (body_exit != NULL) body_exit->Goto(body_entry, true);
|
||||
if (break_block != NULL) break_block->SetJoinId(statement->ExitId());
|
||||
exit_block_ = break_block;
|
||||
body_entry->PostProcessLoopHeader(statement);
|
||||
return break_block;
|
||||
}
|
||||
|
||||
|
||||
void HSubgraph::AppendDoWhile(IterationStatement* statement,
|
||||
HBasicBlock* HGraphBuilder::CreateDoWhile(IterationStatement* statement,
|
||||
HBasicBlock* body_entry,
|
||||
HBasicBlock* go_back,
|
||||
HBasicBlock* exit_block,
|
||||
HBasicBlock* break_block) {
|
||||
if (this->exit_block() != NULL) {
|
||||
this->exit_block()->Goto(body_entry, false);
|
||||
}
|
||||
if (go_back != NULL) {
|
||||
go_back->Goto(body_entry, true);
|
||||
}
|
||||
if (go_back != NULL) go_back->Goto(body_entry, true);
|
||||
if (break_block != NULL) break_block->SetJoinId(statement->ExitId());
|
||||
exit_block_ =
|
||||
JoinBlocks(exit_block, break_block, statement->ExitId());
|
||||
HBasicBlock* new_exit =
|
||||
CreateJoin(exit_block, break_block, statement->ExitId());
|
||||
body_entry->PostProcessLoopHeader(statement);
|
||||
return new_exit;
|
||||
}
|
||||
|
||||
|
||||
void HSubgraph::AppendWhile(IterationStatement* statement,
|
||||
HBasicBlock* condition_entry,
|
||||
HBasicBlock* exit_block,
|
||||
HBasicBlock* body_exit,
|
||||
HBasicBlock* break_block,
|
||||
HBasicBlock* HGraphBuilder::CreateWhile(IterationStatement* statement,
|
||||
HBasicBlock* loop_entry,
|
||||
HBasicBlock* loop_exit) {
|
||||
if (this->exit_block() != NULL) {
|
||||
this->exit_block()->Goto(condition_entry, false);
|
||||
}
|
||||
|
||||
if (break_block != NULL) break_block->SetJoinId(statement->ExitId());
|
||||
exit_block_ =
|
||||
JoinBlocks(exit_block, break_block, statement->ExitId());
|
||||
|
||||
if (loop_entry != NULL) {
|
||||
if (body_exit != NULL) {
|
||||
body_exit->Goto(loop_entry, true);
|
||||
}
|
||||
loop_entry->SetJoinId(statement->EntryId());
|
||||
exit_block_ = JoinBlocks(exit_block_, loop_exit, statement->ExitId());
|
||||
} else {
|
||||
if (body_exit != NULL) {
|
||||
body_exit->Goto(condition_entry, true);
|
||||
}
|
||||
}
|
||||
condition_entry->PostProcessLoopHeader(statement);
|
||||
}
|
||||
|
||||
|
||||
void HSubgraph::Append(BreakableStatement* stmt,
|
||||
HBasicBlock* entry_block,
|
||||
HBasicBlock* exit_block,
|
||||
HBasicBlock* cond_false,
|
||||
HBasicBlock* body_exit,
|
||||
HBasicBlock* break_block) {
|
||||
exit_block_->Goto(entry_block);
|
||||
exit_block_ = exit_block;
|
||||
|
||||
if (stmt != NULL) {
|
||||
entry_block->SetJoinId(stmt->EntryId());
|
||||
if (break_block != NULL) break_block->SetJoinId(stmt->EntryId());
|
||||
exit_block_ = JoinBlocks(exit_block, break_block, stmt->ExitId());
|
||||
}
|
||||
if (break_block != NULL) break_block->SetJoinId(statement->ExitId());
|
||||
HBasicBlock* new_exit =
|
||||
CreateJoin(cond_false, break_block, statement->ExitId());
|
||||
if (body_exit != NULL) body_exit->Goto(loop_entry, true);
|
||||
loop_entry->PostProcessLoopHeader(statement);
|
||||
return new_exit;
|
||||
}
|
||||
|
||||
|
||||
void HSubgraph::FinishExit(HControlInstruction* instruction) {
|
||||
ASSERT(exit_block() != NULL);
|
||||
exit_block_->Finish(instruction);
|
||||
exit_block_->ClearEnvironment();
|
||||
exit_block_ = NULL;
|
||||
void HBasicBlock::FinishExit(HControlInstruction* instruction) {
|
||||
Finish(instruction);
|
||||
ClearEnvironment();
|
||||
}
|
||||
|
||||
|
||||
@ -2165,16 +2112,16 @@ HGraph* HGraphBuilder::CreateGraph(CompilationInfo* info) {
|
||||
|
||||
ZoneList<Statement*>* stmts = info->function()->body();
|
||||
HSubgraph* body = CreateGotoSubgraph(environment());
|
||||
current_block()->Goto(body->entry_block());
|
||||
AddToSubgraph(body, stmts);
|
||||
if (HasStackOverflow()) return NULL;
|
||||
current_subgraph_->Append(NULL,
|
||||
body->entry_block(),
|
||||
body->exit_block(),
|
||||
NULL);
|
||||
body->entry_block()->SetJoinId(info->function()->id());
|
||||
set_current_block(body->exit_block());
|
||||
|
||||
if (graph()->exit_block() != NULL) {
|
||||
graph_->FinishExit(new HReturn(graph_->GetConstantUndefined()));
|
||||
HReturn* instr = new HReturn(graph()->GetConstantUndefined());
|
||||
graph()->exit_block()->FinishExit(instr);
|
||||
graph()->set_exit_block(NULL);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2361,28 +2308,29 @@ HSubgraph* HGraphBuilder::CreateBranchSubgraph(HEnvironment* env) {
|
||||
}
|
||||
|
||||
|
||||
HSubgraph* HGraphBuilder::CreateLoopHeaderSubgraph(HEnvironment* env) {
|
||||
HSubgraph* subgraph = new HSubgraph(graph());
|
||||
HBasicBlock* block = graph()->CreateBasicBlock();
|
||||
HEnvironment* new_env = env->CopyAsLoopHeader(block);
|
||||
block->SetInitialEnvironment(new_env);
|
||||
subgraph->Initialize(block);
|
||||
subgraph->entry_block()->AttachLoopInformation();
|
||||
return subgraph;
|
||||
HBasicBlock* HGraphBuilder::CreateLoopHeader() {
|
||||
HBasicBlock* header = graph()->CreateBasicBlock();
|
||||
HEnvironment* entry_env = environment()->CopyAsLoopHeader(header);
|
||||
header->SetInitialEnvironment(entry_env);
|
||||
header->AttachLoopInformation();
|
||||
return header;
|
||||
}
|
||||
|
||||
|
||||
void HGraphBuilder::VisitBlock(Block* stmt) {
|
||||
if (stmt->labels() != NULL) {
|
||||
HSubgraph* block_graph = CreateGotoSubgraph(environment());
|
||||
current_block()->Goto(block_graph->entry_block());
|
||||
block_graph->entry_block()->SetJoinId(stmt->EntryId());
|
||||
BreakAndContinueInfo break_info(stmt);
|
||||
{ BreakAndContinueScope push(&break_info, this);
|
||||
ADD_TO_SUBGRAPH(block_graph, stmt->statements());
|
||||
}
|
||||
subgraph()->Append(stmt,
|
||||
block_graph->entry_block(),
|
||||
block_graph->exit_block(),
|
||||
break_info.break_block());
|
||||
HBasicBlock* break_block = break_info.break_block();
|
||||
if (break_block != NULL) break_block->SetJoinId(stmt->EntryId());
|
||||
set_current_block(CreateJoin(block_graph->exit_block(),
|
||||
break_block,
|
||||
stmt->ExitId()));
|
||||
} else {
|
||||
VisitStatements(stmt->statements());
|
||||
}
|
||||
@ -2418,9 +2366,9 @@ void HGraphBuilder::VisitIfStatement(IfStatement* stmt) {
|
||||
else_graph->entry_block()->SetJoinId(stmt->ElseId());
|
||||
ADD_TO_SUBGRAPH(else_graph, stmt->else_statement());
|
||||
|
||||
current_subgraph_->AppendJoin(then_graph->exit_block(),
|
||||
set_current_block(CreateJoin(then_graph->exit_block(),
|
||||
else_graph->exit_block(),
|
||||
stmt->id());
|
||||
stmt->id()));
|
||||
}
|
||||
}
|
||||
|
||||
@ -2476,7 +2424,8 @@ void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
|
||||
// Not an inlined return, so an actual one.
|
||||
VISIT_FOR_VALUE(stmt->expression());
|
||||
HValue* result = environment()->Pop();
|
||||
subgraph()->FinishExit(new HReturn(result));
|
||||
current_block()->FinishExit(new HReturn(result));
|
||||
set_current_block(NULL);
|
||||
} else {
|
||||
// Return from an inlined function, visit the subexpression in the
|
||||
// expression context of the call.
|
||||
@ -2685,145 +2634,116 @@ bool HGraph::HasOsrEntryAt(IterationStatement* statement) {
|
||||
}
|
||||
|
||||
|
||||
void HSubgraph::PreProcessOsrEntry(IterationStatement* statement) {
|
||||
void HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) {
|
||||
if (!graph()->HasOsrEntryAt(statement)) return;
|
||||
|
||||
HBasicBlock* non_osr_entry = graph()->CreateBasicBlock();
|
||||
HBasicBlock* osr_entry = graph()->CreateBasicBlock();
|
||||
HValue* true_value = graph()->GetConstantTrue();
|
||||
HTest* test = new HTest(true_value, non_osr_entry, osr_entry);
|
||||
exit_block()->Finish(test);
|
||||
current_block()->Finish(test);
|
||||
|
||||
HBasicBlock* loop_predecessor = graph()->CreateBasicBlock();
|
||||
non_osr_entry->Goto(loop_predecessor);
|
||||
|
||||
set_current_block(osr_entry);
|
||||
int osr_entry_id = statement->OsrEntryId();
|
||||
// We want the correct environment at the OsrEntry instruction. Build
|
||||
// it explicitly. The expression stack should be empty.
|
||||
int count = osr_entry->last_environment()->length();
|
||||
ASSERT(count == (osr_entry->last_environment()->parameter_count() +
|
||||
osr_entry->last_environment()->local_count()));
|
||||
int count = environment()->length();
|
||||
ASSERT(count ==
|
||||
(environment()->parameter_count() + environment()->local_count()));
|
||||
for (int i = 0; i < count; ++i) {
|
||||
HUnknownOSRValue* unknown = new HUnknownOSRValue;
|
||||
osr_entry->AddInstruction(unknown);
|
||||
osr_entry->last_environment()->Bind(i, unknown);
|
||||
AddInstruction(unknown);
|
||||
environment()->Bind(i, unknown);
|
||||
}
|
||||
|
||||
osr_entry->AddSimulate(osr_entry_id);
|
||||
osr_entry->AddInstruction(new HOsrEntry(osr_entry_id));
|
||||
osr_entry->Goto(loop_predecessor);
|
||||
AddSimulate(osr_entry_id);
|
||||
AddInstruction(new HOsrEntry(osr_entry_id));
|
||||
current_block()->Goto(loop_predecessor);
|
||||
loop_predecessor->SetJoinId(statement->EntryId());
|
||||
set_exit_block(loop_predecessor);
|
||||
set_current_block(loop_predecessor);
|
||||
}
|
||||
|
||||
|
||||
void HGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
|
||||
ASSERT(current_block() != NULL);
|
||||
subgraph()->PreProcessOsrEntry(stmt);
|
||||
PreProcessOsrEntry(stmt);
|
||||
HBasicBlock* loop_entry = CreateLoopHeader();
|
||||
current_block()->Goto(loop_entry, false);
|
||||
set_current_block(loop_entry);
|
||||
|
||||
HSubgraph* body_graph = CreateLoopHeaderSubgraph(environment());
|
||||
BreakAndContinueInfo break_info(stmt);
|
||||
{ BreakAndContinueScope push(&break_info, this);
|
||||
ADD_TO_SUBGRAPH(body_graph, stmt->body());
|
||||
Visit(stmt->body());
|
||||
CHECK_BAILOUT;
|
||||
}
|
||||
body_graph->ResolveContinue(stmt, break_info.continue_block());
|
||||
|
||||
if (body_graph->exit_block() == NULL || stmt->cond()->ToBooleanIsTrue()) {
|
||||
subgraph()->AppendEndless(stmt,
|
||||
body_graph->entry_block(),
|
||||
body_graph->exit_block(),
|
||||
HBasicBlock* body_exit =
|
||||
JoinContinue(stmt, current_block(), break_info.continue_block());
|
||||
HBasicBlock* loop_exit = NULL;
|
||||
if (body_exit == NULL || stmt->cond()->ToBooleanIsTrue()) {
|
||||
loop_exit = CreateEndless(stmt,
|
||||
loop_entry,
|
||||
body_exit,
|
||||
break_info.break_block());
|
||||
} else {
|
||||
HSubgraph* go_back = CreateEmptySubgraph();
|
||||
HSubgraph* exit = CreateEmptySubgraph();
|
||||
{
|
||||
SubgraphScope scope(this, body_graph);
|
||||
VISIT_FOR_CONTROL(stmt->cond(),
|
||||
go_back->entry_block(),
|
||||
exit->entry_block());
|
||||
go_back->entry_block()->SetJoinId(stmt->BackEdgeId());
|
||||
exit->entry_block()->SetJoinId(stmt->ExitId());
|
||||
}
|
||||
subgraph()->AppendDoWhile(stmt,
|
||||
body_graph->entry_block(),
|
||||
go_back->exit_block(),
|
||||
exit->exit_block(),
|
||||
set_current_block(body_exit);
|
||||
HBasicBlock* cond_true = graph()->CreateBasicBlock();
|
||||
HBasicBlock* cond_false = graph()->CreateBasicBlock();
|
||||
VISIT_FOR_CONTROL(stmt->cond(), cond_true, cond_false);
|
||||
cond_true->SetJoinId(stmt->BackEdgeId());
|
||||
cond_false->SetJoinId(stmt->ExitId());
|
||||
loop_exit = CreateDoWhile(stmt,
|
||||
loop_entry,
|
||||
cond_true,
|
||||
cond_false,
|
||||
break_info.break_block());
|
||||
}
|
||||
set_current_block(loop_exit);
|
||||
}
|
||||
|
||||
|
||||
void HGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
|
||||
ASSERT(current_block() != NULL);
|
||||
subgraph()->PreProcessOsrEntry(stmt);
|
||||
PreProcessOsrEntry(stmt);
|
||||
HBasicBlock* loop_entry = CreateLoopHeader();
|
||||
current_block()->Goto(loop_entry, false);
|
||||
set_current_block(loop_entry);
|
||||
|
||||
HSubgraph* cond_graph = NULL;
|
||||
HSubgraph* body_graph = NULL;
|
||||
HSubgraph* exit_graph = NULL;
|
||||
|
||||
// If the condition is constant true, do not generate a condition subgraph.
|
||||
if (stmt->cond()->ToBooleanIsTrue()) {
|
||||
body_graph = CreateLoopHeaderSubgraph(environment());
|
||||
} else {
|
||||
cond_graph = CreateLoopHeaderSubgraph(environment());
|
||||
body_graph = CreateEmptySubgraph();
|
||||
exit_graph = CreateEmptySubgraph();
|
||||
{
|
||||
SubgraphScope scope(this, cond_graph);
|
||||
VISIT_FOR_CONTROL(stmt->cond(),
|
||||
body_graph->entry_block(),
|
||||
exit_graph->entry_block());
|
||||
body_graph->entry_block()->SetJoinId(stmt->BodyId());
|
||||
exit_graph->entry_block()->SetJoinId(stmt->ExitId());
|
||||
}
|
||||
// If the condition is constant true, do not generate a branch.
|
||||
HBasicBlock* cond_false = NULL;
|
||||
if (!stmt->cond()->ToBooleanIsTrue()) {
|
||||
HBasicBlock* cond_true = graph()->CreateBasicBlock();
|
||||
cond_false = graph()->CreateBasicBlock();
|
||||
VISIT_FOR_CONTROL(stmt->cond(), cond_true, cond_false);
|
||||
cond_true->SetJoinId(stmt->BodyId());
|
||||
cond_false->SetJoinId(stmt->ExitId());
|
||||
set_current_block(cond_true);
|
||||
}
|
||||
|
||||
BreakAndContinueInfo break_info(stmt);
|
||||
{ BreakAndContinueScope push(&break_info, this);
|
||||
ADD_TO_SUBGRAPH(body_graph, stmt->body());
|
||||
Visit(stmt->body());
|
||||
CHECK_BAILOUT;
|
||||
}
|
||||
body_graph->ResolveContinue(stmt, break_info.continue_block());
|
||||
|
||||
if (cond_graph != NULL) {
|
||||
AppendPeeledWhile(stmt,
|
||||
cond_graph->entry_block(),
|
||||
exit_graph->exit_block(),
|
||||
body_graph->exit_block(),
|
||||
HBasicBlock* body_exit =
|
||||
JoinContinue(stmt, current_block(), break_info.continue_block());
|
||||
HBasicBlock* loop_exit = NULL;
|
||||
if (stmt->cond()->ToBooleanIsTrue()) {
|
||||
// TODO(fschneider): Implement peeling for endless loops as well.
|
||||
loop_exit = CreateEndless(stmt,
|
||||
loop_entry,
|
||||
body_exit,
|
||||
break_info.break_block());
|
||||
} else {
|
||||
// TODO(fschneider): Implement peeling for endless loops as well.
|
||||
subgraph()->AppendEndless(stmt,
|
||||
body_graph->entry_block(),
|
||||
body_graph->exit_block(),
|
||||
loop_exit = CreateWhile(stmt,
|
||||
loop_entry,
|
||||
cond_false,
|
||||
body_exit,
|
||||
break_info.break_block());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void HGraphBuilder::AppendPeeledWhile(IterationStatement* stmt,
|
||||
HBasicBlock* condition_entry,
|
||||
HBasicBlock* exit_block,
|
||||
HBasicBlock* body_exit,
|
||||
HBasicBlock* break_block) {
|
||||
HBasicBlock* loop_entry = NULL;
|
||||
HBasicBlock* loop_exit = NULL;
|
||||
if (FLAG_use_peeling && body_exit != NULL && stmt != peeled_statement_) {
|
||||
// Save the last peeled iteration statement to prevent infinite recursion.
|
||||
IterationStatement* outer_peeled_statement = peeled_statement_;
|
||||
peeled_statement_ = stmt;
|
||||
HSubgraph* loop = CreateGotoSubgraph(body_exit->last_environment());
|
||||
ADD_TO_SUBGRAPH(loop, stmt);
|
||||
peeled_statement_ = outer_peeled_statement;
|
||||
loop_entry = loop->entry_block();
|
||||
loop_exit = loop->exit_block();
|
||||
}
|
||||
subgraph()->AppendWhile(stmt,
|
||||
condition_entry,
|
||||
exit_block,
|
||||
body_exit,
|
||||
break_block,
|
||||
loop_entry,
|
||||
loop_exit);
|
||||
set_current_block(loop_exit);
|
||||
}
|
||||
|
||||
|
||||
@ -2834,57 +2754,50 @@ void HGraphBuilder::VisitForStatement(ForStatement* stmt) {
|
||||
CHECK_BAILOUT;
|
||||
}
|
||||
ASSERT(current_block() != NULL);
|
||||
subgraph()->PreProcessOsrEntry(stmt);
|
||||
PreProcessOsrEntry(stmt);
|
||||
HBasicBlock* loop_entry = CreateLoopHeader();
|
||||
current_block()->Goto(loop_entry, false);
|
||||
set_current_block(loop_entry);
|
||||
|
||||
HSubgraph* cond_graph = NULL;
|
||||
HSubgraph* body_graph = NULL;
|
||||
HSubgraph* exit_graph = NULL;
|
||||
HBasicBlock* cond_false = NULL;
|
||||
if (stmt->cond() != NULL) {
|
||||
cond_graph = CreateLoopHeaderSubgraph(environment());
|
||||
body_graph = CreateEmptySubgraph();
|
||||
exit_graph = CreateEmptySubgraph();
|
||||
{
|
||||
SubgraphScope scope(this, cond_graph);
|
||||
VISIT_FOR_CONTROL(stmt->cond(),
|
||||
body_graph->entry_block(),
|
||||
exit_graph->entry_block());
|
||||
body_graph->entry_block()->SetJoinId(stmt->BodyId());
|
||||
exit_graph->entry_block()->SetJoinId(stmt->ExitId());
|
||||
}
|
||||
} else {
|
||||
body_graph = CreateLoopHeaderSubgraph(environment());
|
||||
HBasicBlock* cond_true = graph()->CreateBasicBlock();
|
||||
cond_false = graph()->CreateBasicBlock();
|
||||
VISIT_FOR_CONTROL(stmt->cond(), cond_true, cond_false);
|
||||
cond_true->SetJoinId(stmt->BodyId());
|
||||
cond_false->SetJoinId(stmt->ExitId());
|
||||
set_current_block(cond_true);
|
||||
}
|
||||
|
||||
BreakAndContinueInfo break_info(stmt);
|
||||
{ BreakAndContinueScope push(&break_info, this);
|
||||
ADD_TO_SUBGRAPH(body_graph, stmt->body());
|
||||
Visit(stmt->body());
|
||||
CHECK_BAILOUT;
|
||||
}
|
||||
HBasicBlock* body_exit =
|
||||
JoinContinue(stmt, current_block(), break_info.continue_block());
|
||||
|
||||
if (stmt->next() != NULL && body_exit != NULL) {
|
||||
set_current_block(body_exit);
|
||||
Visit(stmt->next());
|
||||
CHECK_BAILOUT;
|
||||
body_exit = current_block();
|
||||
}
|
||||
|
||||
HSubgraph* next_graph = NULL;
|
||||
body_graph->ResolveContinue(stmt, break_info.continue_block());
|
||||
|
||||
if (stmt->next() != NULL && body_graph->exit_block() != NULL) {
|
||||
next_graph =
|
||||
CreateGotoSubgraph(body_graph->exit_block()->last_environment());
|
||||
ADD_TO_SUBGRAPH(next_graph, stmt->next());
|
||||
body_graph->Append(NULL,
|
||||
next_graph->entry_block(),
|
||||
next_graph->exit_block(),
|
||||
NULL);
|
||||
next_graph->entry_block()->SetJoinId(stmt->ContinueId());
|
||||
}
|
||||
|
||||
if (cond_graph != NULL) {
|
||||
AppendPeeledWhile(stmt,
|
||||
cond_graph->entry_block(),
|
||||
exit_graph->exit_block(),
|
||||
body_graph->exit_block(),
|
||||
HBasicBlock* loop_exit = NULL;
|
||||
if (stmt->cond() == NULL) {
|
||||
loop_exit = CreateEndless(stmt,
|
||||
loop_entry,
|
||||
body_exit,
|
||||
break_info.break_block());
|
||||
} else {
|
||||
subgraph()->AppendEndless(stmt,
|
||||
body_graph->entry_block(),
|
||||
body_graph->exit_block(),
|
||||
loop_exit = CreateWhile(stmt,
|
||||
loop_entry,
|
||||
cond_false,
|
||||
body_exit,
|
||||
break_info.break_block());
|
||||
}
|
||||
set_current_block(loop_exit);
|
||||
}
|
||||
|
||||
|
||||
@ -2937,9 +2850,9 @@ void HGraphBuilder::VisitConditional(Conditional* expr) {
|
||||
else_graph->entry_block()->SetJoinId(expr->ElseId());
|
||||
ADD_TO_SUBGRAPH(else_graph, expr->else_expression());
|
||||
|
||||
current_subgraph_->AppendJoin(then_graph->exit_block(),
|
||||
set_current_block(CreateJoin(then_graph->exit_block(),
|
||||
else_graph->exit_block(),
|
||||
expr->id());
|
||||
expr->id()));
|
||||
ast_context()->ReturnValue(Pop());
|
||||
}
|
||||
|
||||
@ -3317,7 +3230,8 @@ void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
|
||||
HSubgraph* default_graph = CreateBranchSubgraph(environment());
|
||||
{ SubgraphScope scope(this, default_graph);
|
||||
if (!needs_generic && FLAG_deoptimize_uncommon_cases) {
|
||||
default_graph->FinishExit(new HDeoptimize());
|
||||
default_graph->exit_block()->FinishExit(new HDeoptimize());
|
||||
default_graph->set_exit_block(NULL);
|
||||
} else {
|
||||
HInstruction* instr = BuildStoreNamedGeneric(object, name, value);
|
||||
Push(value);
|
||||
@ -3604,7 +3518,8 @@ void HGraphBuilder::VisitThrow(Throw* expr) {
|
||||
instr->set_position(expr->position());
|
||||
AddInstruction(instr);
|
||||
AddSimulate(expr->id());
|
||||
current_subgraph_->FinishExit(new HAbnormalExit);
|
||||
current_block()->FinishExit(new HAbnormalExit);
|
||||
set_current_block(NULL);
|
||||
}
|
||||
|
||||
|
||||
@ -3652,7 +3567,8 @@ void HGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
|
||||
HSubgraph* default_graph = CreateBranchSubgraph(environment());
|
||||
{ SubgraphScope scope(this, default_graph);
|
||||
if (!needs_generic && FLAG_deoptimize_uncommon_cases) {
|
||||
default_graph->FinishExit(new HDeoptimize());
|
||||
default_graph->exit_block()->FinishExit(new HDeoptimize());
|
||||
default_graph->set_exit_block(NULL);
|
||||
} else {
|
||||
HInstruction* instr = BuildLoadNamedGeneric(object, expr);
|
||||
instr->set_position(expr->position());
|
||||
@ -3853,9 +3769,11 @@ bool HGraphBuilder::TryArgumentsAccess(Property* expr) {
|
||||
HInstruction* elements = AddInstruction(new HArgumentsElements);
|
||||
result = new HArgumentsLength(elements);
|
||||
} else {
|
||||
Push(graph()->GetArgumentsObject());
|
||||
VisitForValue(expr->key());
|
||||
if (HasStackOverflow()) return false;
|
||||
HValue* key = Pop();
|
||||
Drop(1); // Arguments object.
|
||||
HInstruction* elements = AddInstruction(new HArgumentsElements);
|
||||
HInstruction* length = AddInstruction(new HArgumentsLength(elements));
|
||||
AddInstruction(new HBoundsCheck(key, length));
|
||||
@ -4010,7 +3928,8 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
|
||||
HSubgraph* default_graph = CreateBranchSubgraph(environment());
|
||||
{ SubgraphScope scope(this, default_graph);
|
||||
if (!needs_generic && FLAG_deoptimize_uncommon_cases) {
|
||||
default_graph->FinishExit(new HDeoptimize());
|
||||
default_graph->exit_block()->FinishExit(new HDeoptimize());
|
||||
default_graph->set_exit_block(NULL);
|
||||
} else {
|
||||
HContext* context = new HContext;
|
||||
AddInstruction(context);
|
||||
@ -4091,6 +4010,8 @@ bool HGraphBuilder::TryInline(Call* expr) {
|
||||
!Scope::Analyze(&inner_info)) {
|
||||
if (Top::has_pending_exception()) {
|
||||
SetStackOverflow();
|
||||
// Stop trying to optimize and inline this function.
|
||||
target->shared()->set_optimization_disabled(true);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
@ -4730,9 +4651,9 @@ void HGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
|
||||
false_graph->exit_block()->last_environment()->Push(
|
||||
graph_->GetConstantFalse());
|
||||
|
||||
current_subgraph_->AppendJoin(true_graph->exit_block(),
|
||||
set_current_block(CreateJoin(true_graph->exit_block(),
|
||||
false_graph->exit_block(),
|
||||
expr->id());
|
||||
expr->id()));
|
||||
ast_context()->ReturnValue(Pop());
|
||||
} else {
|
||||
ASSERT(ast_context()->IsEffect());
|
||||
|
55
deps/v8/src/hydrogen.h
vendored
55
deps/v8/src/hydrogen.h
vendored
@ -117,6 +117,7 @@ class HBasicBlock: public ZoneObject {
|
||||
void SetJoinId(int id);
|
||||
|
||||
void Finish(HControlInstruction* last);
|
||||
void FinishExit(HControlInstruction* instruction);
|
||||
void Goto(HBasicBlock* block, bool include_stack_check = false);
|
||||
|
||||
int PredecessorIndexOf(HBasicBlock* predecessor) const;
|
||||
@ -206,34 +207,6 @@ class HSubgraph: public ZoneObject {
|
||||
exit_block_ = block;
|
||||
}
|
||||
|
||||
void PreProcessOsrEntry(IterationStatement* statement);
|
||||
|
||||
void AppendJoin(HBasicBlock* first, HBasicBlock* second, int join_id);
|
||||
void AppendWhile(IterationStatement* statement,
|
||||
HBasicBlock* condition_entry,
|
||||
HBasicBlock* exit_block,
|
||||
HBasicBlock* body_exit,
|
||||
HBasicBlock* break_block,
|
||||
HBasicBlock* loop_entry,
|
||||
HBasicBlock* loop_exit);
|
||||
void AppendDoWhile(IterationStatement* statement,
|
||||
HBasicBlock* body_entry,
|
||||
HBasicBlock* go_back,
|
||||
HBasicBlock* exit_block,
|
||||
HBasicBlock* break_block);
|
||||
void AppendEndless(IterationStatement* statement,
|
||||
HBasicBlock* body_entry,
|
||||
HBasicBlock* body_exit,
|
||||
HBasicBlock* break_block);
|
||||
void Append(BreakableStatement* stmt,
|
||||
HBasicBlock* entry_block,
|
||||
HBasicBlock* exit_block,
|
||||
HBasicBlock* break_block);
|
||||
void ResolveContinue(IterationStatement* statement,
|
||||
HBasicBlock* continue_block);
|
||||
HBasicBlock* JoinBlocks(HBasicBlock* a, HBasicBlock* b, int id);
|
||||
|
||||
void FinishExit(HControlInstruction* instruction);
|
||||
void Initialize(HBasicBlock* block) {
|
||||
ASSERT(entry_block_ == NULL);
|
||||
entry_block_ = block;
|
||||
@ -698,11 +671,29 @@ class HGraphBuilder: public AstVisitor {
|
||||
|
||||
void Bailout(const char* reason);
|
||||
|
||||
void AppendPeeledWhile(IterationStatement* stmt,
|
||||
HBasicBlock* condition_entry,
|
||||
HBasicBlock* exit_block,
|
||||
void PreProcessOsrEntry(IterationStatement* statement);
|
||||
|
||||
HBasicBlock* CreateJoin(HBasicBlock* first,
|
||||
HBasicBlock* second,
|
||||
int join_id);
|
||||
HBasicBlock* CreateWhile(IterationStatement* statement,
|
||||
HBasicBlock* loop_entry,
|
||||
HBasicBlock* cond_false,
|
||||
HBasicBlock* body_exit,
|
||||
HBasicBlock* break_block);
|
||||
HBasicBlock* CreateDoWhile(IterationStatement* statement,
|
||||
HBasicBlock* body_entry,
|
||||
HBasicBlock* go_back,
|
||||
HBasicBlock* exit_block,
|
||||
HBasicBlock* break_block);
|
||||
HBasicBlock* CreateEndless(IterationStatement* statement,
|
||||
HBasicBlock* body_entry,
|
||||
HBasicBlock* body_exit,
|
||||
HBasicBlock* break_block);
|
||||
HBasicBlock* JoinContinue(IterationStatement* statement,
|
||||
HBasicBlock* exit_block,
|
||||
HBasicBlock* continue_block);
|
||||
|
||||
|
||||
void AddToSubgraph(HSubgraph* graph, ZoneList<Statement*>* stmts);
|
||||
void AddToSubgraph(HSubgraph* graph, Statement* stmt);
|
||||
@ -748,7 +739,7 @@ class HGraphBuilder: public AstVisitor {
|
||||
HSubgraph* CreateEmptySubgraph();
|
||||
HSubgraph* CreateGotoSubgraph(HEnvironment* env);
|
||||
HSubgraph* CreateBranchSubgraph(HEnvironment* env);
|
||||
HSubgraph* CreateLoopHeaderSubgraph(HEnvironment* env);
|
||||
HBasicBlock* CreateLoopHeader();
|
||||
HSubgraph* CreateInlinedSubgraph(HEnvironment* outer,
|
||||
Handle<JSFunction> target,
|
||||
FunctionLiteral* function);
|
||||
|
5
deps/v8/src/ia32/code-stubs-ia32.cc
vendored
5
deps/v8/src/ia32/code-stubs-ia32.cc
vendored
@ -3399,7 +3399,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
|
||||
__ test(edx, Immediate(kSmiTagMask));
|
||||
__ j(not_zero, &base_nonsmi);
|
||||
|
||||
// Optimized version when both exponent and base is a smi.
|
||||
// Optimized version when both exponent and base are smis.
|
||||
Label powi;
|
||||
__ SmiUntag(edx);
|
||||
__ cvtsi2sd(xmm0, Operand(edx));
|
||||
@ -3438,7 +3438,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
|
||||
__ j(not_carry, &no_multiply);
|
||||
__ mulsd(xmm1, xmm0);
|
||||
__ bind(&no_multiply);
|
||||
__ test(eax, Operand(eax));
|
||||
__ mulsd(xmm0, xmm0);
|
||||
__ j(not_zero, &while_true);
|
||||
|
||||
@ -3525,7 +3524,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
|
||||
__ AllocateHeapNumber(ecx, eax, edx, &call_runtime);
|
||||
__ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm1);
|
||||
__ mov(eax, ecx);
|
||||
__ ret(2);
|
||||
__ ret(2 * kPointerSize);
|
||||
|
||||
__ bind(&call_runtime);
|
||||
__ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
|
||||
|
39
deps/v8/src/ia32/codegen-ia32.cc
vendored
39
deps/v8/src/ia32/codegen-ia32.cc
vendored
@ -3526,7 +3526,8 @@ void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
|
||||
frame_->EmitPush(esi); // The context is the first argument.
|
||||
frame_->EmitPush(Immediate(pairs));
|
||||
frame_->EmitPush(Immediate(Smi::FromInt(is_eval() ? 1 : 0)));
|
||||
Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
|
||||
frame_->EmitPush(Immediate(Smi::FromInt(strict_mode_flag())));
|
||||
Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 4);
|
||||
// Return value is ignored.
|
||||
}
|
||||
|
||||
@ -5259,7 +5260,8 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
|
||||
// by initialization.
|
||||
value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
|
||||
} else {
|
||||
value = frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
|
||||
frame_->Push(Smi::FromInt(strict_mode_flag()));
|
||||
value = frame_->CallRuntime(Runtime::kStoreContextSlot, 4);
|
||||
}
|
||||
// Storing a variable must keep the (new) value on the expression
|
||||
// stack. This is necessary for compiling chained assignment
|
||||
@ -5360,12 +5362,22 @@ void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
|
||||
|
||||
void CodeGenerator::VisitLiteral(Literal* node) {
|
||||
Comment cmnt(masm_, "[ Literal");
|
||||
if (frame_->ConstantPoolOverflowed()) {
|
||||
Result temp = allocator_->Allocate();
|
||||
ASSERT(temp.is_valid());
|
||||
if (in_safe_int32_mode()) {
|
||||
temp.set_untagged_int32(true);
|
||||
}
|
||||
__ Set(temp.reg(), Immediate(node->handle()));
|
||||
frame_->Push(&temp);
|
||||
} else {
|
||||
if (in_safe_int32_mode()) {
|
||||
frame_->PushUntaggedElement(node->handle());
|
||||
} else {
|
||||
frame_->Push(node->handle());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void CodeGenerator::PushUnsafeSmi(Handle<Object> value) {
|
||||
@ -5608,8 +5620,9 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
|
||||
Load(property->key());
|
||||
Load(property->value());
|
||||
if (property->emit_store()) {
|
||||
frame_->Push(Smi::FromInt(NONE)); // PropertyAttributes
|
||||
// Ignore the result.
|
||||
Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 3);
|
||||
Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 4);
|
||||
} else {
|
||||
frame_->Drop(3);
|
||||
}
|
||||
@ -8300,6 +8313,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
|
||||
switch (op) {
|
||||
case Token::SUB: {
|
||||
__ neg(value.reg());
|
||||
frame_->Push(&value);
|
||||
if (node->no_negative_zero()) {
|
||||
// -MIN_INT is MIN_INT with the overflow flag set.
|
||||
unsafe_bailout_->Branch(overflow);
|
||||
@ -8312,17 +8326,18 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
|
||||
}
|
||||
case Token::BIT_NOT: {
|
||||
__ not_(value.reg());
|
||||
frame_->Push(&value);
|
||||
break;
|
||||
}
|
||||
case Token::ADD: {
|
||||
// Unary plus has no effect on int32 values.
|
||||
frame_->Push(&value);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
frame_->Push(&value);
|
||||
} else {
|
||||
Load(node->expression());
|
||||
bool can_overwrite = node->expression()->ResultOverwriteAllowed();
|
||||
@ -9458,11 +9473,13 @@ class DeferredReferenceSetKeyedValue: public DeferredCode {
|
||||
DeferredReferenceSetKeyedValue(Register value,
|
||||
Register key,
|
||||
Register receiver,
|
||||
Register scratch)
|
||||
Register scratch,
|
||||
StrictModeFlag strict_mode)
|
||||
: value_(value),
|
||||
key_(key),
|
||||
receiver_(receiver),
|
||||
scratch_(scratch) {
|
||||
scratch_(scratch),
|
||||
strict_mode_(strict_mode) {
|
||||
set_comment("[ DeferredReferenceSetKeyedValue");
|
||||
}
|
||||
|
||||
@ -9476,6 +9493,7 @@ class DeferredReferenceSetKeyedValue: public DeferredCode {
|
||||
Register receiver_;
|
||||
Register scratch_;
|
||||
Label patch_site_;
|
||||
StrictModeFlag strict_mode_;
|
||||
};
|
||||
|
||||
|
||||
@ -9534,7 +9552,9 @@ void DeferredReferenceSetKeyedValue::Generate() {
|
||||
}
|
||||
|
||||
// Call the IC stub.
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
|
||||
Handle<Code> ic(Builtins::builtin(
|
||||
(strict_mode_ == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict
|
||||
: Builtins::KeyedStoreIC_Initialize));
|
||||
__ call(ic, RelocInfo::CODE_TARGET);
|
||||
// The delta from the start of the map-compare instruction to the
|
||||
// test instruction. We use masm_-> directly here instead of the
|
||||
@ -9896,7 +9916,8 @@ Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
|
||||
new DeferredReferenceSetKeyedValue(result.reg(),
|
||||
key.reg(),
|
||||
receiver.reg(),
|
||||
tmp.reg());
|
||||
tmp.reg(),
|
||||
strict_mode_flag());
|
||||
|
||||
// Check that the receiver is not a smi.
|
||||
__ test(receiver.reg(), Immediate(kSmiTagMask));
|
||||
@ -9951,7 +9972,7 @@ Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
|
||||
|
||||
deferred->BindExit();
|
||||
} else {
|
||||
result = frame()->CallKeyedStoreIC();
|
||||
result = frame()->CallKeyedStoreIC(strict_mode_flag());
|
||||
// Make sure that we do not have a test instruction after the
|
||||
// call. A test instruction after the call is used to
|
||||
// indicate that we have generated an inline version of the
|
||||
|
378
deps/v8/src/ia32/full-codegen-ia32.cc
vendored
378
deps/v8/src/ia32/full-codegen-ia32.cc
vendored
@ -322,23 +322,6 @@ void FullCodeGenerator::EmitReturnSequence() {
|
||||
}
|
||||
|
||||
|
||||
FullCodeGenerator::ConstantOperand FullCodeGenerator::GetConstantOperand(
|
||||
Token::Value op, Expression* left, Expression* right) {
|
||||
ASSERT(ShouldInlineSmiCase(op));
|
||||
if (op == Token::DIV || op == Token::MOD || op == Token::MUL) {
|
||||
// We never generate inlined constant smi operations for these.
|
||||
return kNoConstants;
|
||||
} else if (right->IsSmiLiteral()) {
|
||||
return kRightConstant;
|
||||
} else if (left->IsSmiLiteral() && !Token::IsShiftOp(op)) {
|
||||
// Don't inline shifts with constant left hand side.
|
||||
return kLeftConstant;
|
||||
} else {
|
||||
return kNoConstants;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::EffectContext::Plug(Slot* slot) const {
|
||||
}
|
||||
|
||||
@ -548,7 +531,7 @@ void FullCodeGenerator::DoTest(Label* if_true,
|
||||
__ j(equal, if_true);
|
||||
__ cmp(result_register(), Factory::false_value());
|
||||
__ j(equal, if_false);
|
||||
ASSERT_EQ(0, kSmiTag);
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
__ test(result_register(), Operand(result_register()));
|
||||
__ j(zero, if_false);
|
||||
__ test(result_register(), Immediate(kSmiTagMask));
|
||||
@ -655,6 +638,7 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
|
||||
ASSERT(variable != NULL); // Must have been resolved.
|
||||
Slot* slot = variable->AsSlot();
|
||||
Property* prop = variable->AsProperty();
|
||||
|
||||
if (slot != NULL) {
|
||||
switch (slot->type()) {
|
||||
case Slot::PARAMETER:
|
||||
@ -740,7 +724,9 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
|
||||
prop->key()->AsLiteral()->handle()->IsSmi());
|
||||
__ Set(ecx, Immediate(prop->key()->AsLiteral()->handle()));
|
||||
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
|
||||
Handle<Code> ic(Builtins::builtin(is_strict()
|
||||
? Builtins::KeyedStoreIC_Initialize_Strict
|
||||
: Builtins::KeyedStoreIC_Initialize));
|
||||
EmitCallIC(ic, RelocInfo::CODE_TARGET);
|
||||
}
|
||||
}
|
||||
@ -757,7 +743,8 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
|
||||
__ push(esi); // The context is the first argument.
|
||||
__ push(Immediate(pairs));
|
||||
__ push(Immediate(Smi::FromInt(is_eval() ? 1 : 0)));
|
||||
__ CallRuntime(Runtime::kDeclareGlobals, 3);
|
||||
__ push(Immediate(Smi::FromInt(strict_mode_flag())));
|
||||
__ CallRuntime(Runtime::kDeclareGlobals, 4);
|
||||
// Return value is ignored.
|
||||
}
|
||||
|
||||
@ -814,7 +801,6 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
|
||||
SetSourcePosition(clause->position());
|
||||
Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT);
|
||||
EmitCallIC(ic, &patch_site);
|
||||
|
||||
__ test(eax, Operand(eax));
|
||||
__ j(not_equal, &next_test);
|
||||
__ Drop(1); // Switch value is no longer needed.
|
||||
@ -895,7 +881,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
|
||||
__ cmp(edx, Factory::empty_descriptor_array());
|
||||
__ j(equal, &call_runtime);
|
||||
|
||||
// Check that there in an enum cache in the non-empty instance
|
||||
// Check that there is an enum cache in the non-empty instance
|
||||
// descriptors (edx). This is the case if the next enumeration
|
||||
// index field does not contain a smi.
|
||||
__ mov(edx, FieldOperand(edx, DescriptorArray::kEnumerationIndexOffset));
|
||||
@ -1380,7 +1366,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
|
||||
VisitForAccumulatorValue(value);
|
||||
__ mov(ecx, Immediate(key->handle()));
|
||||
__ mov(edx, Operand(esp, 0));
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
|
||||
Handle<Code> ic(Builtins::builtin(
|
||||
is_strict() ? Builtins::StoreIC_Initialize_Strict
|
||||
: Builtins::StoreIC_Initialize));
|
||||
EmitCallIC(ic, RelocInfo::CODE_TARGET);
|
||||
PrepareForBailoutForId(key->id(), NO_REGISTERS);
|
||||
} else {
|
||||
@ -1394,7 +1382,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
|
||||
VisitForStackValue(key);
|
||||
VisitForStackValue(value);
|
||||
if (property->emit_store()) {
|
||||
__ CallRuntime(Runtime::kSetProperty, 3);
|
||||
__ push(Immediate(Smi::FromInt(NONE))); // PropertyAttributes
|
||||
__ CallRuntime(Runtime::kSetProperty, 4);
|
||||
} else {
|
||||
__ Drop(3);
|
||||
}
|
||||
@ -1572,14 +1561,8 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
|
||||
}
|
||||
|
||||
Token::Value op = expr->binary_op();
|
||||
ConstantOperand constant = ShouldInlineSmiCase(op)
|
||||
? GetConstantOperand(op, expr->target(), expr->value())
|
||||
: kNoConstants;
|
||||
ASSERT(constant == kRightConstant || constant == kNoConstants);
|
||||
if (constant == kNoConstants) {
|
||||
__ push(eax); // Left operand goes on the stack.
|
||||
VisitForAccumulatorValue(expr->value());
|
||||
}
|
||||
|
||||
OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
|
||||
? OVERWRITE_RIGHT
|
||||
@ -1591,8 +1574,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
|
||||
op,
|
||||
mode,
|
||||
expr->target(),
|
||||
expr->value(),
|
||||
constant);
|
||||
expr->value());
|
||||
} else {
|
||||
EmitBinaryOp(op, mode);
|
||||
}
|
||||
@ -1640,220 +1622,11 @@ void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::EmitConstantSmiAdd(Expression* expr,
|
||||
OverwriteMode mode,
|
||||
bool left_is_constant_smi,
|
||||
Smi* value) {
|
||||
NearLabel call_stub, done;
|
||||
// Optimistically add smi value with unknown object. If result overflows or is
|
||||
// not a smi then we had either a smi overflow or added a smi with a tagged
|
||||
// pointer.
|
||||
__ add(Operand(eax), Immediate(value));
|
||||
__ j(overflow, &call_stub);
|
||||
JumpPatchSite patch_site(masm_);
|
||||
patch_site.EmitJumpIfSmi(eax, &done);
|
||||
|
||||
// Undo the optimistic add operation and call the shared stub.
|
||||
__ bind(&call_stub);
|
||||
__ sub(Operand(eax), Immediate(value));
|
||||
TypeRecordingBinaryOpStub stub(Token::ADD, mode);
|
||||
if (left_is_constant_smi) {
|
||||
__ mov(edx, Immediate(value));
|
||||
} else {
|
||||
__ mov(edx, eax);
|
||||
__ mov(eax, Immediate(value));
|
||||
}
|
||||
EmitCallIC(stub.GetCode(), &patch_site);
|
||||
|
||||
__ bind(&done);
|
||||
context()->Plug(eax);
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::EmitConstantSmiSub(Expression* expr,
|
||||
OverwriteMode mode,
|
||||
bool left_is_constant_smi,
|
||||
Smi* value) {
|
||||
NearLabel call_stub, done;
|
||||
// Optimistically subtract smi value with unknown object. If result overflows
|
||||
// or is not a smi then we had either a smi overflow or added a smi with a
|
||||
// tagged pointer.
|
||||
if (left_is_constant_smi) {
|
||||
__ mov(ecx, eax);
|
||||
__ mov(eax, Immediate(value));
|
||||
__ sub(Operand(eax), ecx);
|
||||
} else {
|
||||
__ sub(Operand(eax), Immediate(value));
|
||||
}
|
||||
__ j(overflow, &call_stub);
|
||||
JumpPatchSite patch_site(masm_);
|
||||
patch_site.EmitJumpIfSmi(eax, &done);
|
||||
|
||||
__ bind(&call_stub);
|
||||
if (left_is_constant_smi) {
|
||||
__ mov(edx, Immediate(value));
|
||||
__ mov(eax, ecx);
|
||||
} else {
|
||||
__ add(Operand(eax), Immediate(value)); // Undo the subtraction.
|
||||
__ mov(edx, eax);
|
||||
__ mov(eax, Immediate(value));
|
||||
}
|
||||
TypeRecordingBinaryOpStub stub(Token::SUB, mode);
|
||||
EmitCallIC(stub.GetCode(), &patch_site);
|
||||
|
||||
__ bind(&done);
|
||||
context()->Plug(eax);
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::EmitConstantSmiShiftOp(Expression* expr,
|
||||
Token::Value op,
|
||||
OverwriteMode mode,
|
||||
Smi* value) {
|
||||
NearLabel call_stub, smi_case, done;
|
||||
int shift_value = value->value() & 0x1f;
|
||||
|
||||
JumpPatchSite patch_site(masm_);
|
||||
patch_site.EmitJumpIfSmi(eax, &smi_case);
|
||||
|
||||
// Call stub.
|
||||
__ bind(&call_stub);
|
||||
__ mov(edx, eax);
|
||||
__ mov(eax, Immediate(value));
|
||||
TypeRecordingBinaryOpStub stub(op, mode);
|
||||
EmitCallIC(stub.GetCode(), &patch_site);
|
||||
__ jmp(&done);
|
||||
|
||||
// Smi case.
|
||||
__ bind(&smi_case);
|
||||
switch (op) {
|
||||
case Token::SHL:
|
||||
if (shift_value != 0) {
|
||||
__ mov(edx, eax);
|
||||
if (shift_value > 1) {
|
||||
__ shl(edx, shift_value - 1);
|
||||
}
|
||||
// Convert int result to smi, checking that it is in int range.
|
||||
STATIC_ASSERT(kSmiTagSize == 1); // Adjust code if not the case.
|
||||
__ add(edx, Operand(edx));
|
||||
__ j(overflow, &call_stub);
|
||||
__ mov(eax, edx); // Put result back into eax.
|
||||
}
|
||||
break;
|
||||
case Token::SAR:
|
||||
if (shift_value != 0) {
|
||||
__ sar(eax, shift_value);
|
||||
__ and_(eax, ~kSmiTagMask);
|
||||
}
|
||||
break;
|
||||
case Token::SHR:
|
||||
// SHR must return a positive value. When shifting by 0 or 1 we need to
|
||||
// check that smi tagging the result will not create a negative value.
|
||||
if (shift_value < 2) {
|
||||
__ mov(edx, eax);
|
||||
__ SmiUntag(edx);
|
||||
__ shr(edx, shift_value);
|
||||
__ test(edx, Immediate(0xc0000000));
|
||||
__ j(not_zero, &call_stub);
|
||||
__ SmiTag(edx);
|
||||
__ mov(eax, edx); // Put result back into eax.
|
||||
} else {
|
||||
__ SmiUntag(eax);
|
||||
__ shr(eax, shift_value);
|
||||
__ SmiTag(eax);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
__ bind(&done);
|
||||
context()->Plug(eax);
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::EmitConstantSmiBitOp(Expression* expr,
|
||||
Token::Value op,
|
||||
OverwriteMode mode,
|
||||
Smi* value) {
|
||||
NearLabel smi_case, done;
|
||||
|
||||
JumpPatchSite patch_site(masm_);
|
||||
patch_site.EmitJumpIfSmi(eax, &smi_case);
|
||||
|
||||
// The order of the arguments does not matter for bit-ops with a
|
||||
// constant operand.
|
||||
__ mov(edx, Immediate(value));
|
||||
TypeRecordingBinaryOpStub stub(op, mode);
|
||||
EmitCallIC(stub.GetCode(), &patch_site);
|
||||
__ jmp(&done);
|
||||
|
||||
// Smi case.
|
||||
__ bind(&smi_case);
|
||||
switch (op) {
|
||||
case Token::BIT_OR:
|
||||
__ or_(Operand(eax), Immediate(value));
|
||||
break;
|
||||
case Token::BIT_XOR:
|
||||
__ xor_(Operand(eax), Immediate(value));
|
||||
break;
|
||||
case Token::BIT_AND:
|
||||
__ and_(Operand(eax), Immediate(value));
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
__ bind(&done);
|
||||
context()->Plug(eax);
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::EmitConstantSmiBinaryOp(Expression* expr,
|
||||
Token::Value op,
|
||||
OverwriteMode mode,
|
||||
bool left_is_constant_smi,
|
||||
Smi* value) {
|
||||
switch (op) {
|
||||
case Token::BIT_OR:
|
||||
case Token::BIT_XOR:
|
||||
case Token::BIT_AND:
|
||||
EmitConstantSmiBitOp(expr, op, mode, value);
|
||||
break;
|
||||
case Token::SHL:
|
||||
case Token::SAR:
|
||||
case Token::SHR:
|
||||
ASSERT(!left_is_constant_smi);
|
||||
EmitConstantSmiShiftOp(expr, op, mode, value);
|
||||
break;
|
||||
case Token::ADD:
|
||||
EmitConstantSmiAdd(expr, mode, left_is_constant_smi, value);
|
||||
break;
|
||||
case Token::SUB:
|
||||
EmitConstantSmiSub(expr, mode, left_is_constant_smi, value);
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
|
||||
Token::Value op,
|
||||
OverwriteMode mode,
|
||||
Expression* left,
|
||||
Expression* right,
|
||||
ConstantOperand constant) {
|
||||
if (constant == kRightConstant) {
|
||||
Smi* value = Smi::cast(*right->AsLiteral()->handle());
|
||||
EmitConstantSmiBinaryOp(expr, op, mode, false, value);
|
||||
return;
|
||||
} else if (constant == kLeftConstant) {
|
||||
Smi* value = Smi::cast(*left->AsLiteral()->handle());
|
||||
EmitConstantSmiBinaryOp(expr, op, mode, true, value);
|
||||
return;
|
||||
}
|
||||
|
||||
Expression* right) {
|
||||
// Do combined smi check of the operands. Left operand is on the
|
||||
// stack. Right operand is in eax.
|
||||
NearLabel done, smi_case, stub_call;
|
||||
@ -1985,7 +1758,9 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
|
||||
__ mov(edx, eax);
|
||||
__ pop(eax); // Restore value.
|
||||
__ mov(ecx, prop->key()->AsLiteral()->handle());
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
|
||||
Handle<Code> ic(Builtins::builtin(
|
||||
is_strict() ? Builtins::StoreIC_Initialize_Strict
|
||||
: Builtins::StoreIC_Initialize));
|
||||
EmitCallIC(ic, RelocInfo::CODE_TARGET);
|
||||
break;
|
||||
}
|
||||
@ -2006,7 +1781,9 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
|
||||
__ pop(edx);
|
||||
}
|
||||
__ pop(eax); // Restore value.
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
|
||||
Handle<Code> ic(Builtins::builtin(
|
||||
is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
|
||||
: Builtins::KeyedStoreIC_Initialize));
|
||||
EmitCallIC(ic, RelocInfo::CODE_TARGET);
|
||||
break;
|
||||
}
|
||||
@ -2101,7 +1878,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
|
||||
__ push(eax); // Value.
|
||||
__ push(esi); // Context.
|
||||
__ push(Immediate(var->name()));
|
||||
__ CallRuntime(Runtime::kStoreContextSlot, 3);
|
||||
__ push(Immediate(Smi::FromInt(strict_mode_flag())));
|
||||
__ CallRuntime(Runtime::kStoreContextSlot, 4);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -2132,7 +1910,9 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
|
||||
} else {
|
||||
__ pop(edx);
|
||||
}
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
|
||||
Handle<Code> ic(Builtins::builtin(
|
||||
is_strict() ? Builtins::StoreIC_Initialize_Strict
|
||||
: Builtins::StoreIC_Initialize));
|
||||
EmitCallIC(ic, RelocInfo::CODE_TARGET);
|
||||
|
||||
// If the assignment ends an initialization block, revert to fast case.
|
||||
@ -2170,7 +1950,9 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
|
||||
}
|
||||
// Record source code position before IC call.
|
||||
SetSourcePosition(expr->position());
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
|
||||
Handle<Code> ic(Builtins::builtin(
|
||||
is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
|
||||
: Builtins::KeyedStoreIC_Initialize));
|
||||
EmitCallIC(ic, RelocInfo::CODE_TARGET);
|
||||
|
||||
// If the assignment ends an initialization block, revert to fast case.
|
||||
@ -2283,6 +2065,27 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
|
||||
int arg_count) {
|
||||
// Push copy of the first argument or undefined if it doesn't exist.
|
||||
if (arg_count > 0) {
|
||||
__ push(Operand(esp, arg_count * kPointerSize));
|
||||
} else {
|
||||
__ push(Immediate(Factory::undefined_value()));
|
||||
}
|
||||
|
||||
// Push the receiver of the enclosing function.
|
||||
__ push(Operand(ebp, (2 + scope()->num_parameters()) * kPointerSize));
|
||||
|
||||
// Push the strict mode flag.
|
||||
__ push(Immediate(Smi::FromInt(strict_mode_flag())));
|
||||
|
||||
__ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
|
||||
? Runtime::kResolvePossiblyDirectEvalNoLookup
|
||||
: Runtime::kResolvePossiblyDirectEval, 4);
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::VisitCall(Call* expr) {
|
||||
#ifdef DEBUG
|
||||
// We want to verify that RecordJSReturnSite gets called on all paths
|
||||
@ -2311,21 +2114,30 @@ void FullCodeGenerator::VisitCall(Call* expr) {
|
||||
VisitForStackValue(args->at(i));
|
||||
}
|
||||
|
||||
// Push copy of the function - found below the arguments.
|
||||
__ push(Operand(esp, (arg_count + 1) * kPointerSize));
|
||||
|
||||
// Push copy of the first argument or undefined if it doesn't exist.
|
||||
if (arg_count > 0) {
|
||||
__ push(Operand(esp, arg_count * kPointerSize));
|
||||
} else {
|
||||
__ push(Immediate(Factory::undefined_value()));
|
||||
// If we know that eval can only be shadowed by eval-introduced
|
||||
// variables we attempt to load the global eval function directly
|
||||
// in generated code. If we succeed, there is no need to perform a
|
||||
// context lookup in the runtime system.
|
||||
Label done;
|
||||
if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
|
||||
Label slow;
|
||||
EmitLoadGlobalSlotCheckExtensions(var->AsSlot(),
|
||||
NOT_INSIDE_TYPEOF,
|
||||
&slow);
|
||||
// Push the function and resolve eval.
|
||||
__ push(eax);
|
||||
EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
|
||||
__ jmp(&done);
|
||||
__ bind(&slow);
|
||||
}
|
||||
|
||||
// Push the receiver of the enclosing function and do runtime call.
|
||||
__ push(Operand(ebp, (2 + scope()->num_parameters()) * kPointerSize));
|
||||
// Push the strict mode flag.
|
||||
__ push(Immediate(Smi::FromInt(strict_mode_flag())));
|
||||
__ CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
|
||||
// Push copy of the function (found below the arguments) and
|
||||
// resolve eval.
|
||||
__ push(Operand(esp, (arg_count + 1) * kPointerSize));
|
||||
EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
|
||||
if (done.is_linked()) {
|
||||
__ bind(&done);
|
||||
}
|
||||
|
||||
// The runtime call returns a pair of values in eax (function) and
|
||||
// edx (receiver). Touch up the stack with the right values.
|
||||
@ -2390,7 +2202,9 @@ void FullCodeGenerator::VisitCall(Call* expr) {
|
||||
Literal* key = prop->key()->AsLiteral();
|
||||
if (key != NULL && key->handle()->IsSymbol()) {
|
||||
// Call to a named property, use call IC.
|
||||
{ PreservePositionScope scope(masm()->positions_recorder());
|
||||
VisitForStackValue(prop->obj());
|
||||
}
|
||||
EmitCallWithIC(expr, key->handle(), RelocInfo::CODE_TARGET);
|
||||
} else {
|
||||
// Call to a keyed property.
|
||||
@ -3401,7 +3215,6 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
|
||||
|
||||
void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
|
||||
ASSERT(args->length() == 1);
|
||||
|
||||
VisitForAccumulatorValue(args->at(0));
|
||||
|
||||
if (FLAG_debug_code) {
|
||||
@ -3417,7 +3230,7 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
|
||||
|
||||
void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
|
||||
Label bailout, done, one_char_separator, long_separator,
|
||||
non_trivial_array, not_size_one_array, loop, loop_condition,
|
||||
non_trivial_array, not_size_one_array, loop,
|
||||
loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
|
||||
|
||||
ASSERT(args->length() == 2);
|
||||
@ -3459,7 +3272,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
|
||||
|
||||
// If the array has length zero, return the empty string.
|
||||
__ mov(array_length, FieldOperand(array, JSArray::kLengthOffset));
|
||||
__ sar(array_length, 1);
|
||||
__ SmiUntag(array_length);
|
||||
__ j(not_zero, &non_trivial_array);
|
||||
__ mov(result_operand, Factory::empty_string());
|
||||
__ jmp(&done);
|
||||
@ -3482,12 +3295,13 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
|
||||
// Loop condition: while (index < length).
|
||||
// Live loop registers: index, array_length, string,
|
||||
// scratch, string_length, elements.
|
||||
__ jmp(&loop_condition);
|
||||
__ bind(&loop);
|
||||
if (FLAG_debug_code) {
|
||||
__ cmp(index, Operand(array_length));
|
||||
__ j(greater_equal, &done);
|
||||
|
||||
__ mov(string, FieldOperand(elements, index,
|
||||
__ Assert(less, "No empty arrays here in EmitFastAsciiArrayJoin");
|
||||
}
|
||||
__ bind(&loop);
|
||||
__ mov(string, FieldOperand(elements,
|
||||
index,
|
||||
times_pointer_size,
|
||||
FixedArray::kHeaderSize));
|
||||
__ test(string, Immediate(kSmiTagMask));
|
||||
@ -3502,7 +3316,6 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
|
||||
FieldOperand(string, SeqAsciiString::kLengthOffset));
|
||||
__ j(overflow, &bailout);
|
||||
__ add(Operand(index), Immediate(1));
|
||||
__ bind(&loop_condition);
|
||||
__ cmp(index, Operand(array_length));
|
||||
__ j(less, &loop);
|
||||
|
||||
@ -3531,7 +3344,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(ZoneList<Expression*>* args) {
|
||||
__ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
|
||||
__ and_(scratch, Immediate(
|
||||
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
|
||||
__ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
|
||||
__ cmp(scratch, ASCII_STRING_TYPE);
|
||||
__ j(not_equal, &bailout);
|
||||
|
||||
// Add (separator length times array_length) - separator length
|
||||
@ -3791,6 +3604,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
|
||||
Label* if_true = NULL;
|
||||
Label* if_false = NULL;
|
||||
Label* fall_through = NULL;
|
||||
|
||||
// Notice that the labels are swapped.
|
||||
context()->PrepareTest(&materialize_true, &materialize_false,
|
||||
&if_false, &if_true, &fall_through);
|
||||
@ -4023,7 +3837,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
|
||||
case NAMED_PROPERTY: {
|
||||
__ mov(ecx, prop->key()->AsLiteral()->handle());
|
||||
__ pop(edx);
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
|
||||
Handle<Code> ic(Builtins::builtin(
|
||||
is_strict() ? Builtins::StoreIC_Initialize_Strict
|
||||
: Builtins::StoreIC_Initialize));
|
||||
EmitCallIC(ic, RelocInfo::CODE_TARGET);
|
||||
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
|
||||
if (expr->is_postfix()) {
|
||||
@ -4038,7 +3854,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
|
||||
case KEYED_PROPERTY: {
|
||||
__ pop(ecx);
|
||||
__ pop(edx);
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
|
||||
Handle<Code> ic(Builtins::builtin(
|
||||
is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
|
||||
: Builtins::KeyedStoreIC_Initialize));
|
||||
EmitCallIC(ic, RelocInfo::CODE_TARGET);
|
||||
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
|
||||
if (expr->is_postfix()) {
|
||||
@ -4386,6 +4204,22 @@ void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
|
||||
|
||||
|
||||
void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
|
||||
switch (ic->kind()) {
|
||||
case Code::LOAD_IC:
|
||||
__ IncrementCounter(&Counters::named_load_full, 1);
|
||||
break;
|
||||
case Code::KEYED_LOAD_IC:
|
||||
__ IncrementCounter(&Counters::keyed_load_full, 1);
|
||||
break;
|
||||
case Code::STORE_IC:
|
||||
__ IncrementCounter(&Counters::named_store_full, 1);
|
||||
break;
|
||||
case Code::KEYED_STORE_IC:
|
||||
__ IncrementCounter(&Counters::keyed_store_full, 1);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
__ call(ic, RelocInfo::CODE_TARGET);
|
||||
if (patch_site != NULL && patch_site->is_bound()) {
|
||||
patch_site->EmitPatchInfo();
|
||||
|
27
deps/v8/src/ia32/ic-ia32.cc
vendored
27
deps/v8/src/ia32/ic-ia32.cc
vendored
@ -761,7 +761,8 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
|
||||
}
|
||||
|
||||
|
||||
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
|
||||
StrictModeFlag strict_mode) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- eax : value
|
||||
// -- ecx : key
|
||||
@ -801,7 +802,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
|
||||
// Slow case: call runtime.
|
||||
__ bind(&slow);
|
||||
GenerateRuntimeSetProperty(masm);
|
||||
GenerateRuntimeSetProperty(masm, strict_mode);
|
||||
|
||||
// Check whether the elements is a pixel array.
|
||||
__ bind(&check_pixel_array);
|
||||
@ -1488,7 +1489,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
|
||||
|
||||
|
||||
void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
|
||||
Code::ExtraICState extra_ic_state) {
|
||||
StrictModeFlag strict_mode) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- eax : value
|
||||
// -- ecx : name
|
||||
@ -1499,7 +1500,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
|
||||
Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
|
||||
NOT_IN_LOOP,
|
||||
MONOMORPHIC,
|
||||
extra_ic_state);
|
||||
strict_mode);
|
||||
StubCache::GenerateProbe(masm, flags, edx, ecx, ebx, no_reg);
|
||||
|
||||
// Cache miss: Jump to runtime.
|
||||
@ -1617,7 +1618,8 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
|
||||
}
|
||||
|
||||
|
||||
void StoreIC::GenerateGlobalProxy(MacroAssembler* masm) {
|
||||
void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
|
||||
StrictModeFlag strict_mode) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- eax : value
|
||||
// -- ecx : name
|
||||
@ -1628,14 +1630,17 @@ void StoreIC::GenerateGlobalProxy(MacroAssembler* masm) {
|
||||
__ push(edx);
|
||||
__ push(ecx);
|
||||
__ push(eax);
|
||||
__ push(ebx);
|
||||
__ push(Immediate(Smi::FromInt(NONE))); // PropertyAttributes
|
||||
__ push(Immediate(Smi::FromInt(strict_mode)));
|
||||
__ push(ebx); // return address
|
||||
|
||||
// Do tail-call to runtime routine.
|
||||
__ TailCallRuntime(Runtime::kSetProperty, 3, 1);
|
||||
__ TailCallRuntime(Runtime::kSetProperty, 5, 1);
|
||||
}
|
||||
|
||||
|
||||
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
|
||||
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
|
||||
StrictModeFlag strict_mode) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- eax : value
|
||||
// -- ecx : key
|
||||
@ -1647,10 +1652,12 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
|
||||
__ push(edx);
|
||||
__ push(ecx);
|
||||
__ push(eax);
|
||||
__ push(ebx);
|
||||
__ push(Immediate(Smi::FromInt(NONE))); // PropertyAttributes
|
||||
__ push(Immediate(Smi::FromInt(strict_mode))); // Strict mode.
|
||||
__ push(ebx); // return address
|
||||
|
||||
// Do tail-call to runtime routine.
|
||||
__ TailCallRuntime(Runtime::kSetProperty, 3, 1);
|
||||
__ TailCallRuntime(Runtime::kSetProperty, 5, 1);
|
||||
}
|
||||
|
||||
|
||||
|
20
deps/v8/src/ia32/lithium-codegen-ia32.cc
vendored
20
deps/v8/src/ia32/lithium-codegen-ia32.cc
vendored
@ -127,7 +127,7 @@ bool LCodeGen::GenerateRelocPadding() {
|
||||
int reloc_size = masm()->relocation_writer_size();
|
||||
while (reloc_size < deoptimization_reloc_size.min_size) {
|
||||
__ RecordComment(RelocInfo::kFillerCommentString, true);
|
||||
reloc_size += RelocInfo::kRelocCommentSize;
|
||||
reloc_size += RelocInfo::kMinRelocCommentSize;
|
||||
}
|
||||
return !is_aborted();
|
||||
}
|
||||
@ -588,7 +588,8 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
|
||||
Handle<DeoptimizationInputData> data =
|
||||
Factory::NewDeoptimizationInputData(length, TENURED);
|
||||
|
||||
data->SetTranslationByteArray(*translations_.CreateByteArray());
|
||||
Handle<ByteArray> translations = translations_.CreateByteArray();
|
||||
data->SetTranslationByteArray(*translations);
|
||||
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
|
||||
|
||||
Handle<FixedArray> literals =
|
||||
@ -1912,12 +1913,7 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
|
||||
__ bind(&before_push_delta);
|
||||
__ mov(temp, Immediate(delta));
|
||||
__ StoreToSafepointRegisterSlot(temp, temp);
|
||||
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
|
||||
__ call(stub.GetCode(), RelocInfo::CODE_TARGET);
|
||||
ASSERT_EQ(kAdditionalDelta,
|
||||
masm_->SizeOfCodeGeneratedSince(&before_push_delta));
|
||||
RecordSafepointWithRegisters(
|
||||
instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
|
||||
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
|
||||
// Put the result value into the eax slot and restore all registers.
|
||||
__ StoreToSafepointRegisterSlot(eax, eax);
|
||||
__ PopSafepointRegisters();
|
||||
@ -2786,7 +2782,9 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
|
||||
ASSERT(ToRegister(instr->value()).is(eax));
|
||||
|
||||
__ mov(ecx, instr->name());
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
|
||||
Handle<Code> ic(Builtins::builtin(
|
||||
info_->is_strict() ? Builtins::StoreIC_Initialize_Strict
|
||||
: Builtins::StoreIC_Initialize));
|
||||
CallCode(ic, RelocInfo::CODE_TARGET, instr);
|
||||
}
|
||||
|
||||
@ -2854,7 +2852,9 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
|
||||
ASSERT(ToRegister(instr->key()).is(ecx));
|
||||
ASSERT(ToRegister(instr->value()).is(eax));
|
||||
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
|
||||
Handle<Code> ic(Builtins::builtin(
|
||||
info_->is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
|
||||
: Builtins::KeyedStoreIC_Initialize));
|
||||
CallCode(ic, RelocInfo::CODE_TARGET, instr);
|
||||
}
|
||||
|
||||
|
11
deps/v8/src/ia32/lithium-ia32.cc
vendored
11
deps/v8/src/ia32/lithium-ia32.cc
vendored
@ -870,11 +870,19 @@ LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
|
||||
ASSERT(instr->representation().IsDouble());
|
||||
ASSERT(instr->left()->representation().IsDouble());
|
||||
ASSERT(instr->right()->representation().IsDouble());
|
||||
if (op == Token::MOD) {
|
||||
LOperand* left = UseFixedDouble(instr->left(), xmm2);
|
||||
LOperand* right = UseFixedDouble(instr->right(), xmm1);
|
||||
LArithmeticD* result = new LArithmeticD(op, left, right);
|
||||
return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
|
||||
|
||||
} else {
|
||||
LOperand* left = UseRegisterAtStart(instr->left());
|
||||
LOperand* right = UseRegisterAtStart(instr->right());
|
||||
LArithmeticD* result = new LArithmeticD(op, left, right);
|
||||
return DefineSameAsFirst(result);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
|
||||
@ -1165,8 +1173,7 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
|
||||
new LInstanceOfKnownGlobal(
|
||||
UseFixed(instr->value(), InstanceofStub::left()),
|
||||
FixedTemp(edi));
|
||||
MarkAsSaveDoubles(result);
|
||||
return AssignEnvironment(AssignPointerMap(DefineFixed(result, eax)));
|
||||
return MarkAsCall(DefineFixed(result, eax), instr);
|
||||
}
|
||||
|
||||
|
||||
|
10
deps/v8/src/ia32/stub-cache-ia32.cc
vendored
10
deps/v8/src/ia32/stub-cache-ia32.cc
vendored
@ -2552,12 +2552,13 @@ MaybeObject* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
|
||||
__ push(edx); // receiver
|
||||
__ push(ecx); // name
|
||||
__ push(eax); // value
|
||||
__ push(Immediate(Smi::FromInt(strict_mode_)));
|
||||
__ push(ebx); // restore return address
|
||||
|
||||
// Do tail-call to the runtime system.
|
||||
ExternalReference store_ic_property =
|
||||
ExternalReference(IC_Utility(IC::kStoreInterceptorProperty));
|
||||
__ TailCallExternalReference(store_ic_property, 3, 1);
|
||||
__ TailCallExternalReference(store_ic_property, 4, 1);
|
||||
|
||||
// Handle store cache miss.
|
||||
__ bind(&miss);
|
||||
@ -3712,10 +3713,13 @@ MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
|
||||
__ push(edx);
|
||||
__ push(ecx);
|
||||
__ push(eax);
|
||||
__ push(ebx);
|
||||
__ push(Immediate(Smi::FromInt(NONE))); // PropertyAttributes
|
||||
__ push(Immediate(Smi::FromInt(
|
||||
Code::ExtractExtraICStateFromFlags(flags) & kStrictMode)));
|
||||
__ push(ebx); // return address
|
||||
|
||||
// Do tail-call to runtime routine.
|
||||
__ TailCallRuntime(Runtime::kSetProperty, 3, 1);
|
||||
__ TailCallRuntime(Runtime::kSetProperty, 5, 1);
|
||||
|
||||
return GetCode(flags);
|
||||
}
|
||||
|
25
deps/v8/src/ia32/virtual-frame-ia32.cc
vendored
25
deps/v8/src/ia32/virtual-frame-ia32.cc
vendored
@ -1038,8 +1038,8 @@ Result VirtualFrame::CallStoreIC(Handle<String> name,
|
||||
StrictModeFlag strict_mode) {
|
||||
// Value and (if not contextual) receiver are on top of the frame.
|
||||
// The IC expects name in ecx, value in eax, and receiver in edx.
|
||||
Handle<Code> ic(Builtins::builtin(strict_mode == kStrictMode
|
||||
? Builtins::StoreIC_Initialize_Strict
|
||||
Handle<Code> ic(Builtins::builtin(
|
||||
(strict_mode == kStrictMode) ? Builtins::StoreIC_Initialize_Strict
|
||||
: Builtins::StoreIC_Initialize));
|
||||
|
||||
Result value = Pop();
|
||||
@ -1061,7 +1061,7 @@ Result VirtualFrame::CallStoreIC(Handle<String> name,
|
||||
}
|
||||
|
||||
|
||||
Result VirtualFrame::CallKeyedStoreIC() {
|
||||
Result VirtualFrame::CallKeyedStoreIC(StrictModeFlag strict_mode) {
|
||||
// Value, key, and receiver are on the top of the frame. The IC
|
||||
// expects value in eax, key in ecx, and receiver in edx.
|
||||
Result value = Pop();
|
||||
@ -1105,7 +1105,9 @@ Result VirtualFrame::CallKeyedStoreIC() {
|
||||
receiver.Unuse();
|
||||
}
|
||||
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
|
||||
Handle<Code> ic(Builtins::builtin(
|
||||
(strict_mode == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict
|
||||
: Builtins::KeyedStoreIC_Initialize));
|
||||
return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
|
||||
}
|
||||
|
||||
@ -1306,6 +1308,7 @@ void VirtualFrame::EmitPush(Immediate immediate, TypeInfo info) {
|
||||
|
||||
|
||||
void VirtualFrame::PushUntaggedElement(Handle<Object> value) {
|
||||
ASSERT(!ConstantPoolOverflowed());
|
||||
elements_.Add(FrameElement::ConstantElement(value, FrameElement::NOT_SYNCED));
|
||||
elements_[element_count() - 1].set_untagged_int32(true);
|
||||
}
|
||||
@ -1336,6 +1339,20 @@ void VirtualFrame::Push(Expression* expr) {
|
||||
}
|
||||
|
||||
|
||||
void VirtualFrame::Push(Handle<Object> value) {
|
||||
if (ConstantPoolOverflowed()) {
|
||||
Result temp = cgen()->allocator()->Allocate();
|
||||
ASSERT(temp.is_valid());
|
||||
__ Set(temp.reg(), Immediate(value));
|
||||
Push(&temp);
|
||||
} else {
|
||||
FrameElement element =
|
||||
FrameElement::ConstantElement(value, FrameElement::NOT_SYNCED);
|
||||
elements_.Add(element);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#undef __
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
6
deps/v8/src/ia32/virtual-frame-ia32.h
vendored
6
deps/v8/src/ia32/virtual-frame-ia32.h
vendored
@ -370,7 +370,7 @@ class VirtualFrame: public ZoneObject {
|
||||
|
||||
// Call keyed store IC. Value, key, and receiver are found on top
|
||||
// of the frame. All three are dropped.
|
||||
Result CallKeyedStoreIC();
|
||||
Result CallKeyedStoreIC(StrictModeFlag strict_mode);
|
||||
|
||||
// Call call IC. Function name, arguments, and receiver are found on top
|
||||
// of the frame and dropped by the call. The argument count does not
|
||||
@ -419,9 +419,11 @@ class VirtualFrame: public ZoneObject {
|
||||
void EmitPush(Immediate immediate,
|
||||
TypeInfo info = TypeInfo::Unknown());
|
||||
|
||||
inline bool ConstantPoolOverflowed();
|
||||
|
||||
// Push an element on the virtual frame.
|
||||
void Push(Handle<Object> value);
|
||||
inline void Push(Register reg, TypeInfo info = TypeInfo::Unknown());
|
||||
inline void Push(Handle<Object> value);
|
||||
inline void Push(Smi* value);
|
||||
|
||||
void PushUntaggedElement(Handle<Object> value);
|
||||
|
9
deps/v8/src/ic-inl.h
vendored
9
deps/v8/src/ic-inl.h
vendored
@ -76,6 +76,15 @@ Code* IC::GetTargetAtAddress(Address address) {
|
||||
|
||||
void IC::SetTargetAtAddress(Address address, Code* target) {
|
||||
ASSERT(target->is_inline_cache_stub() || target->is_compare_ic_stub());
|
||||
#ifdef DEBUG
|
||||
// STORE_IC and KEYED_STORE_IC use Code::extra_ic_state() to mark
|
||||
// ICs as strict mode. The strict-ness of the IC must be preserved.
|
||||
Code* old_target = GetTargetAtAddress(address);
|
||||
if (old_target->kind() == Code::STORE_IC ||
|
||||
old_target->kind() == Code::KEYED_STORE_IC) {
|
||||
ASSERT(old_target->extra_ic_state() == target->extra_ic_state());
|
||||
}
|
||||
#endif
|
||||
Assembler::set_target_address_at(address, target->instruction_start());
|
||||
}
|
||||
|
||||
|
101
deps/v8/src/ic.cc
vendored
101
deps/v8/src/ic.cc
vendored
@ -343,7 +343,7 @@ void StoreIC::Clear(Address address, Code* target) {
|
||||
if (target->ic_state() == UNINITIALIZED) return;
|
||||
ClearInlinedVersion(address);
|
||||
SetTargetAtAddress(address,
|
||||
target->extra_ic_state() == kStoreICStrict
|
||||
(target->extra_ic_state() == kStrictMode)
|
||||
? initialize_stub_strict()
|
||||
: initialize_stub());
|
||||
}
|
||||
@ -366,7 +366,10 @@ void KeyedStoreIC::RestoreInlinedVersion(Address address) {
|
||||
|
||||
void KeyedStoreIC::Clear(Address address, Code* target) {
|
||||
if (target->ic_state() == UNINITIALIZED) return;
|
||||
SetTargetAtAddress(address, initialize_stub());
|
||||
SetTargetAtAddress(address,
|
||||
(target->extra_ic_state() == kStrictMode)
|
||||
? initialize_stub_strict()
|
||||
: initialize_stub());
|
||||
}
|
||||
|
||||
|
||||
@ -804,6 +807,7 @@ MaybeObject* KeyedCallIC::LoadFunction(State state,
|
||||
|
||||
HandleScope scope;
|
||||
Handle<Object> result = GetProperty(object, key);
|
||||
RETURN_IF_EMPTY_HANDLE(result);
|
||||
|
||||
// Make receiver an object if the callee requires it. Strict mode or builtin
|
||||
// functions do not wrap the receiver, non-strict functions and objects
|
||||
@ -1226,7 +1230,8 @@ MaybeObject* KeyedLoadIC::Load(State state,
|
||||
if (receiver->HasExternalArrayElements()) {
|
||||
MaybeObject* probe =
|
||||
StubCache::ComputeKeyedLoadOrStoreExternalArray(*receiver,
|
||||
false);
|
||||
false,
|
||||
kNonStrictMode);
|
||||
stub = probe->IsFailure() ?
|
||||
NULL : Code::cast(probe->ToObjectUnchecked());
|
||||
} else if (receiver->HasIndexedInterceptor()) {
|
||||
@ -1382,7 +1387,7 @@ static bool LookupForWrite(JSObject* object,
|
||||
|
||||
|
||||
MaybeObject* StoreIC::Store(State state,
|
||||
Code::ExtraICState extra_ic_state,
|
||||
StrictModeFlag strict_mode,
|
||||
Handle<Object> object,
|
||||
Handle<String> name,
|
||||
Handle<Object> value) {
|
||||
@ -1412,11 +1417,11 @@ MaybeObject* StoreIC::Store(State state,
|
||||
#ifdef DEBUG
|
||||
if (FLAG_trace_ic) PrintF("[StoreIC : +#length /array]\n");
|
||||
#endif
|
||||
Builtins::Name target = (extra_ic_state == kStoreICStrict)
|
||||
Builtins::Name target = (strict_mode == kStrictMode)
|
||||
? Builtins::StoreIC_ArrayLength_Strict
|
||||
: Builtins::StoreIC_ArrayLength;
|
||||
set_target(Builtins::builtin(target));
|
||||
return receiver->SetProperty(*name, *value, NONE);
|
||||
return receiver->SetProperty(*name, *value, NONE, strict_mode);
|
||||
}
|
||||
|
||||
// Lookup the property locally in the receiver.
|
||||
@ -1440,13 +1445,15 @@ MaybeObject* StoreIC::Store(State state,
|
||||
// Index is an offset from the end of the object.
|
||||
int offset = map->instance_size() + (index * kPointerSize);
|
||||
if (PatchInlinedStore(address(), map, offset)) {
|
||||
set_target(megamorphic_stub());
|
||||
set_target((strict_mode == kStrictMode)
|
||||
? megamorphic_stub_strict()
|
||||
: megamorphic_stub());
|
||||
#ifdef DEBUG
|
||||
if (FLAG_trace_ic) {
|
||||
PrintF("[StoreIC : inline patch %s]\n", *name->ToCString());
|
||||
}
|
||||
#endif
|
||||
return receiver->SetProperty(*name, *value, NONE);
|
||||
return receiver->SetProperty(*name, *value, NONE, strict_mode);
|
||||
#ifdef DEBUG
|
||||
|
||||
} else {
|
||||
@ -1473,19 +1480,24 @@ MaybeObject* StoreIC::Store(State state,
|
||||
|
||||
// If no inlined store ic was patched, generate a stub for this
|
||||
// store.
|
||||
UpdateCaches(&lookup, state, extra_ic_state, receiver, name, value);
|
||||
UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
|
||||
} else {
|
||||
// Strict mode doesn't allow setting non-existent global property.
|
||||
if (extra_ic_state == kStoreICStrict && IsContextual(object)) {
|
||||
// Strict mode doesn't allow setting non-existent global property
|
||||
// or an assignment to a read only property.
|
||||
if (strict_mode == kStrictMode) {
|
||||
if (lookup.IsFound() && lookup.IsReadOnly()) {
|
||||
return TypeError("strict_read_only_property", object, name);
|
||||
} else if (IsContextual(object)) {
|
||||
return ReferenceError("not_defined", name);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (receiver->IsJSGlobalProxy()) {
|
||||
// Generate a generic stub that goes to the runtime when we see a global
|
||||
// proxy as receiver.
|
||||
Code* stub = (extra_ic_state == kStoreICStrict)
|
||||
Code* stub = (strict_mode == kStrictMode)
|
||||
? global_proxy_stub_strict()
|
||||
: global_proxy_stub();
|
||||
if (target() != stub) {
|
||||
@ -1497,13 +1509,13 @@ MaybeObject* StoreIC::Store(State state,
|
||||
}
|
||||
|
||||
// Set the property.
|
||||
return receiver->SetProperty(*name, *value, NONE);
|
||||
return receiver->SetProperty(*name, *value, NONE, strict_mode);
|
||||
}
|
||||
|
||||
|
||||
void StoreIC::UpdateCaches(LookupResult* lookup,
|
||||
State state,
|
||||
Code::ExtraICState extra_ic_state,
|
||||
StrictModeFlag strict_mode,
|
||||
Handle<JSObject> receiver,
|
||||
Handle<String> name,
|
||||
Handle<Object> value) {
|
||||
@ -1525,7 +1537,7 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
|
||||
switch (type) {
|
||||
case FIELD: {
|
||||
maybe_code = StubCache::ComputeStoreField(
|
||||
*name, *receiver, lookup->GetFieldIndex(), NULL, extra_ic_state);
|
||||
*name, *receiver, lookup->GetFieldIndex(), NULL, strict_mode);
|
||||
break;
|
||||
}
|
||||
case MAP_TRANSITION: {
|
||||
@ -1535,7 +1547,7 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
|
||||
Handle<Map> transition(lookup->GetTransitionMap());
|
||||
int index = transition->PropertyIndexFor(*name);
|
||||
maybe_code = StubCache::ComputeStoreField(
|
||||
*name, *receiver, index, *transition, extra_ic_state);
|
||||
*name, *receiver, index, *transition, strict_mode);
|
||||
break;
|
||||
}
|
||||
case NORMAL: {
|
||||
@ -1547,10 +1559,10 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
|
||||
JSGlobalPropertyCell* cell =
|
||||
JSGlobalPropertyCell::cast(global->GetPropertyCell(lookup));
|
||||
maybe_code = StubCache::ComputeStoreGlobal(
|
||||
*name, *global, cell, extra_ic_state);
|
||||
*name, *global, cell, strict_mode);
|
||||
} else {
|
||||
if (lookup->holder() != *receiver) return;
|
||||
maybe_code = StubCache::ComputeStoreNormal(extra_ic_state);
|
||||
maybe_code = StubCache::ComputeStoreNormal(strict_mode);
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -1559,13 +1571,13 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
|
||||
AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
|
||||
if (v8::ToCData<Address>(callback->setter()) == 0) return;
|
||||
maybe_code = StubCache::ComputeStoreCallback(
|
||||
*name, *receiver, callback, extra_ic_state);
|
||||
*name, *receiver, callback, strict_mode);
|
||||
break;
|
||||
}
|
||||
case INTERCEPTOR: {
|
||||
ASSERT(!receiver->GetNamedInterceptor()->setter()->IsUndefined());
|
||||
maybe_code = StubCache::ComputeStoreInterceptor(
|
||||
*name, *receiver, extra_ic_state);
|
||||
*name, *receiver, strict_mode);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
@ -1582,7 +1594,7 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
|
||||
} else if (state == MONOMORPHIC) {
|
||||
// Only move to megamorphic if the target changes.
|
||||
if (target() != Code::cast(code)) {
|
||||
set_target(extra_ic_state == kStoreICStrict
|
||||
set_target((strict_mode == kStrictMode)
|
||||
? megamorphic_stub_strict()
|
||||
: megamorphic_stub());
|
||||
}
|
||||
@ -1598,6 +1610,7 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
|
||||
|
||||
|
||||
MaybeObject* KeyedStoreIC::Store(State state,
|
||||
StrictModeFlag strict_mode,
|
||||
Handle<Object> object,
|
||||
Handle<Object> key,
|
||||
Handle<Object> value) {
|
||||
@ -1629,11 +1642,11 @@ MaybeObject* KeyedStoreIC::Store(State state,
|
||||
|
||||
// Update inline cache and stub cache.
|
||||
if (FLAG_use_ic) {
|
||||
UpdateCaches(&lookup, state, receiver, name, value);
|
||||
UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
|
||||
}
|
||||
|
||||
// Set the property.
|
||||
return receiver->SetProperty(*name, *value, NONE);
|
||||
return receiver->SetProperty(*name, *value, NONE, strict_mode);
|
||||
}
|
||||
|
||||
// Do not use ICs for objects that require access checks (including
|
||||
@ -1642,23 +1655,25 @@ MaybeObject* KeyedStoreIC::Store(State state,
|
||||
ASSERT(!(use_ic && object->IsJSGlobalProxy()));
|
||||
|
||||
if (use_ic) {
|
||||
Code* stub = generic_stub();
|
||||
Code* stub =
|
||||
(strict_mode == kStrictMode) ? generic_stub_strict() : generic_stub();
|
||||
if (state == UNINITIALIZED) {
|
||||
if (object->IsJSObject()) {
|
||||
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
|
||||
if (receiver->HasExternalArrayElements()) {
|
||||
MaybeObject* probe =
|
||||
StubCache::ComputeKeyedLoadOrStoreExternalArray(*receiver, true);
|
||||
StubCache::ComputeKeyedLoadOrStoreExternalArray(
|
||||
*receiver, true, strict_mode);
|
||||
stub = probe->IsFailure() ?
|
||||
NULL : Code::cast(probe->ToObjectUnchecked());
|
||||
} else if (receiver->HasPixelElements()) {
|
||||
MaybeObject* probe =
|
||||
StubCache::ComputeKeyedStorePixelArray(*receiver);
|
||||
StubCache::ComputeKeyedStorePixelArray(*receiver, strict_mode);
|
||||
stub = probe->IsFailure() ?
|
||||
NULL : Code::cast(probe->ToObjectUnchecked());
|
||||
} else if (key->IsSmi() && receiver->map()->has_fast_elements()) {
|
||||
MaybeObject* probe =
|
||||
StubCache::ComputeKeyedStoreSpecialized(*receiver);
|
||||
StubCache::ComputeKeyedStoreSpecialized(*receiver, strict_mode);
|
||||
stub = probe->IsFailure() ?
|
||||
NULL : Code::cast(probe->ToObjectUnchecked());
|
||||
}
|
||||
@ -1668,12 +1683,13 @@ MaybeObject* KeyedStoreIC::Store(State state,
|
||||
}
|
||||
|
||||
// Set the property.
|
||||
return Runtime::SetObjectProperty(object, key, value, NONE);
|
||||
return Runtime::SetObjectProperty(object, key, value, NONE, strict_mode);
|
||||
}
|
||||
|
||||
|
||||
void KeyedStoreIC::UpdateCaches(LookupResult* lookup,
|
||||
State state,
|
||||
StrictModeFlag strict_mode,
|
||||
Handle<JSObject> receiver,
|
||||
Handle<String> name,
|
||||
Handle<Object> value) {
|
||||
@ -1700,8 +1716,8 @@ void KeyedStoreIC::UpdateCaches(LookupResult* lookup,
|
||||
|
||||
switch (type) {
|
||||
case FIELD: {
|
||||
maybe_code = StubCache::ComputeKeyedStoreField(*name, *receiver,
|
||||
lookup->GetFieldIndex());
|
||||
maybe_code = StubCache::ComputeKeyedStoreField(
|
||||
*name, *receiver, lookup->GetFieldIndex(), NULL, strict_mode);
|
||||
break;
|
||||
}
|
||||
case MAP_TRANSITION: {
|
||||
@ -1710,8 +1726,8 @@ void KeyedStoreIC::UpdateCaches(LookupResult* lookup,
|
||||
ASSERT(type == MAP_TRANSITION);
|
||||
Handle<Map> transition(lookup->GetTransitionMap());
|
||||
int index = transition->PropertyIndexFor(*name);
|
||||
maybe_code = StubCache::ComputeKeyedStoreField(*name, *receiver,
|
||||
index, *transition);
|
||||
maybe_code = StubCache::ComputeKeyedStoreField(
|
||||
*name, *receiver, index, *transition, strict_mode);
|
||||
break;
|
||||
}
|
||||
// fall through.
|
||||
@ -1719,7 +1735,9 @@ void KeyedStoreIC::UpdateCaches(LookupResult* lookup,
|
||||
default: {
|
||||
// Always rewrite to the generic case so that we do not
|
||||
// repeatedly try to rewrite.
|
||||
maybe_code = generic_stub();
|
||||
maybe_code = (strict_mode == kStrictMode)
|
||||
? generic_stub_strict()
|
||||
: generic_stub();
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -1734,7 +1752,9 @@ void KeyedStoreIC::UpdateCaches(LookupResult* lookup,
|
||||
if (state == UNINITIALIZED || state == PREMONOMORPHIC) {
|
||||
set_target(Code::cast(code));
|
||||
} else if (state == MONOMORPHIC) {
|
||||
set_target(megamorphic_stub());
|
||||
set_target((strict_mode == kStrictMode)
|
||||
? megamorphic_stub_strict()
|
||||
: megamorphic_stub());
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
@ -1835,8 +1855,11 @@ MUST_USE_RESULT MaybeObject* StoreIC_Miss(Arguments args) {
|
||||
StoreIC ic;
|
||||
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
|
||||
Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
|
||||
return ic.Store(state, extra_ic_state, args.at<Object>(0),
|
||||
args.at<String>(1), args.at<Object>(2));
|
||||
return ic.Store(state,
|
||||
static_cast<StrictModeFlag>(extra_ic_state & kStrictMode),
|
||||
args.at<Object>(0),
|
||||
args.at<String>(1),
|
||||
args.at<Object>(2));
|
||||
}
|
||||
|
||||
|
||||
@ -1900,7 +1923,11 @@ MUST_USE_RESULT MaybeObject* KeyedStoreIC_Miss(Arguments args) {
|
||||
ASSERT(args.length() == 3);
|
||||
KeyedStoreIC ic;
|
||||
IC::State state = IC::StateFrom(ic.target(), args[0], args[1]);
|
||||
return ic.Store(state, args.at<Object>(0), args.at<Object>(1),
|
||||
Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state();
|
||||
return ic.Store(state,
|
||||
static_cast<StrictModeFlag>(extra_ic_state & kStrictMode),
|
||||
args.at<Object>(0),
|
||||
args.at<Object>(1),
|
||||
args.at<Object>(2));
|
||||
}
|
||||
|
||||
|
45
deps/v8/src/ic.h
vendored
45
deps/v8/src/ic.h
vendored
@ -398,16 +398,10 @@ class KeyedLoadIC: public IC {
|
||||
|
||||
class StoreIC: public IC {
|
||||
public:
|
||||
|
||||
enum StoreICStrictMode {
|
||||
kStoreICNonStrict = kNonStrictMode,
|
||||
kStoreICStrict = kStrictMode
|
||||
};
|
||||
|
||||
StoreIC() : IC(NO_EXTRA_FRAME) { ASSERT(target()->is_store_stub()); }
|
||||
|
||||
MUST_USE_RESULT MaybeObject* Store(State state,
|
||||
Code::ExtraICState extra_ic_state,
|
||||
StrictModeFlag strict_mode,
|
||||
Handle<Object> object,
|
||||
Handle<String> name,
|
||||
Handle<Object> value);
|
||||
@ -416,10 +410,11 @@ class StoreIC: public IC {
|
||||
static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
|
||||
static void GenerateMiss(MacroAssembler* masm);
|
||||
static void GenerateMegamorphic(MacroAssembler* masm,
|
||||
Code::ExtraICState extra_ic_state);
|
||||
StrictModeFlag strict_mode);
|
||||
static void GenerateArrayLength(MacroAssembler* masm);
|
||||
static void GenerateNormal(MacroAssembler* masm);
|
||||
static void GenerateGlobalProxy(MacroAssembler* masm);
|
||||
static void GenerateGlobalProxy(MacroAssembler* masm,
|
||||
StrictModeFlag strict_mode);
|
||||
|
||||
// Clear the use of an inlined version.
|
||||
static void ClearInlinedVersion(Address address);
|
||||
@ -433,11 +428,18 @@ class StoreIC: public IC {
|
||||
// lookup result.
|
||||
void UpdateCaches(LookupResult* lookup,
|
||||
State state,
|
||||
Code::ExtraICState extra_ic_state,
|
||||
StrictModeFlag strict_mode,
|
||||
Handle<JSObject> receiver,
|
||||
Handle<String> name,
|
||||
Handle<Object> value);
|
||||
|
||||
void set_target(Code* code) {
|
||||
// Strict mode must be preserved across IC patching.
|
||||
ASSERT((code->extra_ic_state() & kStrictMode) ==
|
||||
(target()->extra_ic_state() & kStrictMode));
|
||||
IC::set_target(code);
|
||||
}
|
||||
|
||||
// Stub accessors.
|
||||
static Code* megamorphic_stub() {
|
||||
return Builtins::builtin(Builtins::StoreIC_Megamorphic);
|
||||
@ -473,6 +475,7 @@ class KeyedStoreIC: public IC {
|
||||
KeyedStoreIC() : IC(NO_EXTRA_FRAME) { }
|
||||
|
||||
MUST_USE_RESULT MaybeObject* Store(State state,
|
||||
StrictModeFlag strict_mode,
|
||||
Handle<Object> object,
|
||||
Handle<Object> name,
|
||||
Handle<Object> value);
|
||||
@ -480,8 +483,9 @@ class KeyedStoreIC: public IC {
|
||||
// Code generators for stub routines. Only called once at startup.
|
||||
static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
|
||||
static void GenerateMiss(MacroAssembler* masm);
|
||||
static void GenerateRuntimeSetProperty(MacroAssembler* masm);
|
||||
static void GenerateGeneric(MacroAssembler* masm);
|
||||
static void GenerateRuntimeSetProperty(MacroAssembler* masm,
|
||||
StrictModeFlag strict_mode);
|
||||
static void GenerateGeneric(MacroAssembler* masm, StrictModeFlag strict_mode);
|
||||
|
||||
// Clear the inlined version so the IC is always hit.
|
||||
static void ClearInlinedVersion(Address address);
|
||||
@ -493,20 +497,37 @@ class KeyedStoreIC: public IC {
|
||||
// Update the inline cache.
|
||||
void UpdateCaches(LookupResult* lookup,
|
||||
State state,
|
||||
StrictModeFlag strict_mode,
|
||||
Handle<JSObject> receiver,
|
||||
Handle<String> name,
|
||||
Handle<Object> value);
|
||||
|
||||
void set_target(Code* code) {
|
||||
// Strict mode must be preserved across IC patching.
|
||||
ASSERT((code->extra_ic_state() & kStrictMode) ==
|
||||
(target()->extra_ic_state() & kStrictMode));
|
||||
IC::set_target(code);
|
||||
}
|
||||
|
||||
// Stub accessors.
|
||||
static Code* initialize_stub() {
|
||||
return Builtins::builtin(Builtins::KeyedStoreIC_Initialize);
|
||||
}
|
||||
static Code* initialize_stub_strict() {
|
||||
return Builtins::builtin(Builtins::KeyedStoreIC_Initialize_Strict);
|
||||
}
|
||||
static Code* megamorphic_stub() {
|
||||
return Builtins::builtin(Builtins::KeyedStoreIC_Generic);
|
||||
}
|
||||
static Code* megamorphic_stub_strict() {
|
||||
return Builtins::builtin(Builtins::KeyedStoreIC_Generic_Strict);
|
||||
}
|
||||
static Code* generic_stub() {
|
||||
return Builtins::builtin(Builtins::KeyedStoreIC_Generic);
|
||||
}
|
||||
static Code* generic_stub_strict() {
|
||||
return Builtins::builtin(Builtins::KeyedStoreIC_Generic_Strict);
|
||||
}
|
||||
|
||||
static void Clear(Address address, Code* target);
|
||||
|
||||
|
90
deps/v8/src/liveobjectlist-inl.h
vendored
90
deps/v8/src/liveobjectlist-inl.h
vendored
@ -32,5 +32,95 @@
|
||||
|
||||
#include "liveobjectlist.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
#ifdef LIVE_OBJECT_LIST
|
||||
|
||||
void LiveObjectList::GCEpilogue() {
|
||||
if (!NeedLOLProcessing()) return;
|
||||
GCEpiloguePrivate();
|
||||
}
|
||||
|
||||
|
||||
void LiveObjectList::GCPrologue() {
|
||||
if (!NeedLOLProcessing()) return;
|
||||
#ifdef VERIFY_LOL
|
||||
if (FLAG_verify_lol) {
|
||||
Verify();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void LiveObjectList::IterateElements(ObjectVisitor* v) {
|
||||
if (!NeedLOLProcessing()) return;
|
||||
IterateElementsPrivate(v);
|
||||
}
|
||||
|
||||
|
||||
void LiveObjectList::ProcessNonLive(HeapObject *obj) {
|
||||
// Only do work if we have at least one list to process.
|
||||
if (last()) DoProcessNonLive(obj);
|
||||
}
|
||||
|
||||
|
||||
void LiveObjectList::UpdateReferencesForScavengeGC() {
|
||||
if (LiveObjectList::NeedLOLProcessing()) {
|
||||
UpdateLiveObjectListVisitor update_visitor;
|
||||
LiveObjectList::IterateElements(&update_visitor);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
LiveObjectList* LiveObjectList::FindLolForId(int id,
|
||||
LiveObjectList* start_lol) {
|
||||
if (id != 0) {
|
||||
LiveObjectList* lol = start_lol;
|
||||
while (lol != NULL) {
|
||||
if (lol->id() == id) {
|
||||
return lol;
|
||||
}
|
||||
lol = lol->prev_;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
// Iterates the elements in every lol and returns the one that matches the
|
||||
// specified key. If no matching element is found, then it returns NULL.
|
||||
template <typename T>
|
||||
inline LiveObjectList::Element*
|
||||
LiveObjectList::FindElementFor(T (*GetValue)(LiveObjectList::Element*), T key) {
|
||||
LiveObjectList *lol = last();
|
||||
while (lol != NULL) {
|
||||
Element* elements = lol->elements_;
|
||||
for (int i = 0; i < lol->obj_count_; i++) {
|
||||
Element* element = &elements[i];
|
||||
if (GetValue(element) == key) {
|
||||
return element;
|
||||
}
|
||||
}
|
||||
lol = lol->prev_;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
inline int LiveObjectList::GetElementId(LiveObjectList::Element* element) {
|
||||
return element->id_;
|
||||
}
|
||||
|
||||
|
||||
inline HeapObject*
|
||||
LiveObjectList::GetElementObj(LiveObjectList::Element* element) {
|
||||
return element->obj_;
|
||||
}
|
||||
|
||||
#endif // LIVE_OBJECT_LIST
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // V8_LIVEOBJECTLIST_INL_H_
|
||||
|
||||
|
2476
deps/v8/src/liveobjectlist.cc
vendored
2476
deps/v8/src/liveobjectlist.cc
vendored
File diff suppressed because it is too large
Load Diff
280
deps/v8/src/liveobjectlist.h
vendored
280
deps/v8/src/liveobjectlist.h
vendored
@ -40,54 +40,225 @@ namespace internal {
|
||||
|
||||
#ifdef LIVE_OBJECT_LIST
|
||||
|
||||
#ifdef DEBUG
|
||||
// The following symbol when defined enables thorough verification of lol data.
|
||||
// FLAG_verify_lol will also need to set to true to enable the verification.
|
||||
#define VERIFY_LOL
|
||||
#endif
|
||||
|
||||
// Temporary stubbed out LiveObjectList implementation.
|
||||
|
||||
typedef int LiveObjectType;
|
||||
class LolFilter;
|
||||
class LiveObjectSummary;
|
||||
class DumpWriter;
|
||||
class SummaryWriter;
|
||||
|
||||
|
||||
// The LiveObjectList is both a mechanism for tracking a live capture of
|
||||
// objects in the JS heap, as well as is the data structure which represents
|
||||
// each of those captures. Unlike a snapshot, the lol is live. For example,
|
||||
// if an object in a captured lol dies and is collected by the GC, the lol
|
||||
// will reflect that the object is no longer available. The term
|
||||
// LiveObjectList (and lol) is used to describe both the mechanism and the
|
||||
// data structure depending on context of use.
|
||||
//
|
||||
// In captured lols, objects are tracked using their address and an object id.
|
||||
// The object id is unique. Once assigned to an object, the object id can never
|
||||
// be assigned to another object. That is unless all captured lols are deleted
|
||||
// which allows the user to start over with a fresh set of lols and object ids.
|
||||
// The uniqueness of the object ids allows the user to track specific objects
|
||||
// and inspect its longevity while debugging JS code in execution.
|
||||
//
|
||||
// The lol comes with utility functions to capture, dump, summarize, and diff
|
||||
// captured lols amongst other functionality. These functionality are
|
||||
// accessible via the v8 debugger interface.
|
||||
class LiveObjectList {
|
||||
public:
|
||||
inline static void GCEpilogue() {}
|
||||
inline static void GCPrologue() {}
|
||||
inline static void IterateElements(ObjectVisitor* v) {}
|
||||
inline static void ProcessNonLive(HeapObject *obj) {}
|
||||
inline static void UpdateReferencesForScavengeGC() {}
|
||||
inline static void GCEpilogue();
|
||||
inline static void GCPrologue();
|
||||
inline static void IterateElements(ObjectVisitor* v);
|
||||
inline static void ProcessNonLive(HeapObject *obj);
|
||||
inline static void UpdateReferencesForScavengeGC();
|
||||
|
||||
static MaybeObject* Capture() { return Heap::undefined_value(); }
|
||||
static bool Delete(int id) { return false; }
|
||||
// Note: LOLs can be listed by calling Dump(0, <lol id>), and 2 LOLs can be
|
||||
// compared/diff'ed using Dump(<lol id1>, <lol id2>, ...). This will yield
|
||||
// a verbose dump of all the objects in the resultant lists.
|
||||
// Similarly, a summarized result of a LOL listing or a diff can be
|
||||
// attained using the Summarize(0, <lol id>) and Summarize(<lol id1,
|
||||
// <lol id2>, ...) respectively.
|
||||
|
||||
static MaybeObject* Capture();
|
||||
static bool Delete(int id);
|
||||
static MaybeObject* Dump(int id1,
|
||||
int id2,
|
||||
int start_idx,
|
||||
int dump_limit,
|
||||
Handle<JSObject> filter_obj) {
|
||||
return Heap::undefined_value();
|
||||
}
|
||||
static MaybeObject* Info(int start_idx, int dump_limit) {
|
||||
return Heap::undefined_value();
|
||||
}
|
||||
static MaybeObject* Summarize(int id1,
|
||||
int id2,
|
||||
Handle<JSObject> filter_obj) {
|
||||
return Heap::undefined_value();
|
||||
}
|
||||
Handle<JSObject> filter_obj);
|
||||
static MaybeObject* Info(int start_idx, int dump_limit);
|
||||
static MaybeObject* Summarize(int id1, int id2, Handle<JSObject> filter_obj);
|
||||
|
||||
static void Reset() {}
|
||||
static Object* GetObj(int obj_id) { return Heap::undefined_value(); }
|
||||
static Object* GetObjId(Handle<String> address) {
|
||||
return Heap::undefined_value();
|
||||
}
|
||||
static void Reset();
|
||||
static Object* GetObj(int obj_id);
|
||||
static int GetObjId(Object* obj);
|
||||
static Object* GetObjId(Handle<String> address);
|
||||
static MaybeObject* GetObjRetainers(int obj_id,
|
||||
Handle<JSObject> instance_filter,
|
||||
bool verbose,
|
||||
int start,
|
||||
int count,
|
||||
Handle<JSObject> filter_obj) {
|
||||
return Heap::undefined_value();
|
||||
}
|
||||
Handle<JSObject> filter_obj);
|
||||
|
||||
static Object* GetPath(int obj_id1,
|
||||
int obj_id2,
|
||||
Handle<JSObject> instance_filter) {
|
||||
return Heap::undefined_value();
|
||||
Handle<JSObject> instance_filter);
|
||||
static Object* PrintObj(int obj_id);
|
||||
|
||||
private:
|
||||
|
||||
struct Element {
|
||||
int id_;
|
||||
HeapObject* obj_;
|
||||
};
|
||||
|
||||
explicit LiveObjectList(LiveObjectList* prev, int capacity);
|
||||
~LiveObjectList();
|
||||
|
||||
static void GCEpiloguePrivate();
|
||||
static void IterateElementsPrivate(ObjectVisitor* v);
|
||||
|
||||
static void DoProcessNonLive(HeapObject *obj);
|
||||
|
||||
static int CompareElement(const Element* a, const Element* b);
|
||||
|
||||
static Object* GetPathPrivate(HeapObject* obj1, HeapObject* obj2);
|
||||
|
||||
static int GetRetainers(Handle<HeapObject> target,
|
||||
Handle<JSObject> instance_filter,
|
||||
Handle<FixedArray> retainers_arr,
|
||||
int start,
|
||||
int dump_limit,
|
||||
int* total_count,
|
||||
LolFilter* filter,
|
||||
LiveObjectSummary *summary,
|
||||
JSFunction* arguments_function,
|
||||
Handle<Object> error);
|
||||
|
||||
static MaybeObject* DumpPrivate(DumpWriter* writer,
|
||||
int start,
|
||||
int dump_limit,
|
||||
LolFilter* filter);
|
||||
static MaybeObject* SummarizePrivate(SummaryWriter* writer,
|
||||
LolFilter* filter,
|
||||
bool is_tracking_roots);
|
||||
|
||||
static bool NeedLOLProcessing() { return (last() != NULL); }
|
||||
static void NullifyNonLivePointer(HeapObject **p) {
|
||||
// Mask out the low bit that marks this as a heap object. We'll use this
|
||||
// cleared bit as an indicator that this pointer needs to be collected.
|
||||
//
|
||||
// Meanwhile, we still preserve its approximate value so that we don't
|
||||
// have to resort the elements list all the time.
|
||||
//
|
||||
// Note: Doing so also makes this HeapObject* look like an SMI. Hence,
|
||||
// GC pointer updater will ignore it when it gets scanned.
|
||||
*p = reinterpret_cast<HeapObject*>((*p)->address());
|
||||
}
|
||||
|
||||
LiveObjectList* prev() { return prev_; }
|
||||
LiveObjectList* next() { return next_; }
|
||||
int id() { return id_; }
|
||||
|
||||
static int list_count() { return list_count_; }
|
||||
static LiveObjectList* last() { return last_; }
|
||||
|
||||
inline static LiveObjectList* FindLolForId(int id, LiveObjectList* start_lol);
|
||||
int TotalObjCount() { return GetTotalObjCountAndSize(NULL); }
|
||||
int GetTotalObjCountAndSize(int* size_p);
|
||||
|
||||
bool Add(HeapObject* obj);
|
||||
Element* Find(HeapObject* obj);
|
||||
static void NullifyMostRecent(HeapObject* obj);
|
||||
void Sort();
|
||||
static void SortAll();
|
||||
|
||||
static void PurgeDuplicates(); // Only to be called by GCEpilogue.
|
||||
|
||||
#ifdef VERIFY_LOL
|
||||
static void Verify(bool match_heap_exactly = false);
|
||||
static void VerifyNotInFromSpace();
|
||||
#endif
|
||||
|
||||
// Iterates the elements in every lol and returns the one that matches the
|
||||
// specified key. If no matching element is found, then it returns NULL.
|
||||
template <typename T>
|
||||
inline static LiveObjectList::Element*
|
||||
FindElementFor(T (*GetValue)(LiveObjectList::Element*), T key);
|
||||
|
||||
inline static int GetElementId(Element* element);
|
||||
inline static HeapObject* GetElementObj(Element* element);
|
||||
|
||||
// Instance fields.
|
||||
LiveObjectList* prev_;
|
||||
LiveObjectList* next_;
|
||||
int id_;
|
||||
int capacity_;
|
||||
int obj_count_;
|
||||
Element *elements_;
|
||||
|
||||
// Statics for managing all the lists.
|
||||
static uint32_t next_element_id_;
|
||||
static int list_count_;
|
||||
static int last_id_;
|
||||
static LiveObjectList* first_;
|
||||
static LiveObjectList* last_;
|
||||
|
||||
friend class LolIterator;
|
||||
friend class LolForwardIterator;
|
||||
friend class LolDumpWriter;
|
||||
friend class RetainersDumpWriter;
|
||||
friend class RetainersSummaryWriter;
|
||||
friend class UpdateLiveObjectListVisitor;
|
||||
};
|
||||
|
||||
|
||||
// Helper class for updating the LiveObjectList HeapObject pointers.
|
||||
class UpdateLiveObjectListVisitor: public ObjectVisitor {
|
||||
public:
|
||||
|
||||
void VisitPointer(Object** p) { UpdatePointer(p); }
|
||||
|
||||
void VisitPointers(Object** start, Object** end) {
|
||||
// Copy all HeapObject pointers in [start, end).
|
||||
for (Object** p = start; p < end; p++) UpdatePointer(p);
|
||||
}
|
||||
|
||||
private:
|
||||
// Based on Heap::ScavengeObject() but only does forwarding of pointers
|
||||
// to live new space objects, and not actually keep them alive.
|
||||
void UpdatePointer(Object** p) {
|
||||
Object* object = *p;
|
||||
if (!Heap::InNewSpace(object)) return;
|
||||
|
||||
HeapObject* heap_obj = HeapObject::cast(object);
|
||||
ASSERT(Heap::InFromSpace(heap_obj));
|
||||
|
||||
// We use the first word (where the map pointer usually is) of a heap
|
||||
// object to record the forwarding pointer. A forwarding pointer can
|
||||
// point to an old space, the code space, or the to space of the new
|
||||
// generation.
|
||||
MapWord first_word = heap_obj->map_word();
|
||||
|
||||
// If the first word is a forwarding address, the object has already been
|
||||
// copied.
|
||||
if (first_word.IsForwardingAddress()) {
|
||||
*p = first_word.ToForwardingAddress();
|
||||
return;
|
||||
|
||||
// Else, it's a dead object.
|
||||
} else {
|
||||
LiveObjectList::NullifyNonLivePointer(reinterpret_cast<HeapObject**>(p));
|
||||
}
|
||||
}
|
||||
static Object* PrintObj(int obj_id) { return Heap::undefined_value(); }
|
||||
};
|
||||
|
||||
|
||||
@ -96,11 +267,50 @@ class LiveObjectList {
|
||||
|
||||
class LiveObjectList {
|
||||
public:
|
||||
static void GCEpilogue() {}
|
||||
static void GCPrologue() {}
|
||||
static void IterateElements(ObjectVisitor* v) {}
|
||||
static void ProcessNonLive(HeapObject *obj) {}
|
||||
static void UpdateReferencesForScavengeGC() {}
|
||||
inline static void GCEpilogue() {}
|
||||
inline static void GCPrologue() {}
|
||||
inline static void IterateElements(ObjectVisitor* v) {}
|
||||
inline static void ProcessNonLive(HeapObject* obj) {}
|
||||
inline static void UpdateReferencesForScavengeGC() {}
|
||||
|
||||
inline static MaybeObject* Capture() { return Heap::undefined_value(); }
|
||||
inline static bool Delete(int id) { return false; }
|
||||
inline static MaybeObject* Dump(int id1,
|
||||
int id2,
|
||||
int start_idx,
|
||||
int dump_limit,
|
||||
Handle<JSObject> filter_obj) {
|
||||
return Heap::undefined_value();
|
||||
}
|
||||
inline static MaybeObject* Info(int start_idx, int dump_limit) {
|
||||
return Heap::undefined_value();
|
||||
}
|
||||
inline static MaybeObject* Summarize(int id1,
|
||||
int id2,
|
||||
Handle<JSObject> filter_obj) {
|
||||
return Heap::undefined_value();
|
||||
}
|
||||
|
||||
inline static void Reset() {}
|
||||
inline static Object* GetObj(int obj_id) { return Heap::undefined_value(); }
|
||||
inline static Object* GetObjId(Handle<String> address) {
|
||||
return Heap::undefined_value();
|
||||
}
|
||||
inline static MaybeObject* GetObjRetainers(int obj_id,
|
||||
Handle<JSObject> instance_filter,
|
||||
bool verbose,
|
||||
int start,
|
||||
int count,
|
||||
Handle<JSObject> filter_obj) {
|
||||
return Heap::undefined_value();
|
||||
}
|
||||
|
||||
inline static Object* GetPath(int obj_id1,
|
||||
int obj_id2,
|
||||
Handle<JSObject> instance_filter) {
|
||||
return Heap::undefined_value();
|
||||
}
|
||||
inline static Object* PrintObj(int obj_id) { return Heap::undefined_value(); }
|
||||
};
|
||||
|
||||
|
||||
|
7
deps/v8/src/mark-compact.cc
vendored
7
deps/v8/src/mark-compact.cc
vendored
@ -1353,6 +1353,9 @@ void MarkCompactCollector::MarkLiveObjects() {
|
||||
|
||||
// Flush code from collected candidates.
|
||||
FlushCode::ProcessCandidates();
|
||||
|
||||
// Clean up dead objects from the runtime profiler.
|
||||
RuntimeProfiler::RemoveDeadSamples();
|
||||
}
|
||||
|
||||
|
||||
@ -1937,6 +1940,9 @@ static void SweepNewSpace(NewSpace* space) {
|
||||
// All pointers were updated. Update auxiliary allocation info.
|
||||
Heap::IncrementYoungSurvivorsCounter(survivors_size);
|
||||
space->set_age_mark(space->top());
|
||||
|
||||
// Update JSFunction pointers from the runtime profiler.
|
||||
RuntimeProfiler::UpdateSamplesAfterScavenge();
|
||||
}
|
||||
|
||||
|
||||
@ -2535,6 +2541,7 @@ void MarkCompactCollector::UpdatePointers() {
|
||||
state_ = UPDATE_POINTERS;
|
||||
#endif
|
||||
UpdatingVisitor updating_visitor;
|
||||
RuntimeProfiler::UpdateSamplesAfterCompact(&updating_visitor);
|
||||
Heap::IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
|
||||
GlobalHandles::IterateWeakRoots(&updating_visitor);
|
||||
|
||||
|
8
deps/v8/src/messages.js
vendored
8
deps/v8/src/messages.js
vendored
@ -226,6 +226,10 @@ function FormatMessage(message) {
|
||||
strict_reserved_word: ["Use of future reserved word in strict mode"],
|
||||
strict_delete: ["Delete of an unqualified identifier in strict mode."],
|
||||
strict_delete_property: ["Cannot delete property '", "%0", "' of ", "%1"],
|
||||
strict_const: ["Use of const in strict mode."],
|
||||
strict_function: ["In strict mode code, functions can only be declared at top level or immediately within another function." ],
|
||||
strict_read_only_property: ["Cannot assign to read only property '", "%0", "' of ", "%1"],
|
||||
strict_cannot_assign: ["Cannot assign to read only '", "%0", "' in strict mode"],
|
||||
};
|
||||
}
|
||||
var message_type = %MessageGetType(message);
|
||||
@ -1059,8 +1063,8 @@ function errorToString() {
|
||||
}
|
||||
}
|
||||
|
||||
%FunctionSetName(errorToString, 'toString');
|
||||
%SetProperty($Error.prototype, 'toString', errorToString, DONT_ENUM);
|
||||
|
||||
InstallFunctions($Error.prototype, DONT_ENUM, ['toString', errorToString]);
|
||||
|
||||
// Boilerplate for exceptions for stack overflows. Used from
|
||||
// Top::StackOverflow().
|
||||
|
10
deps/v8/src/objects-inl.h
vendored
10
deps/v8/src/objects-inl.h
vendored
@ -769,6 +769,10 @@ bool Object::HasSpecificClassOf(String* name) {
|
||||
|
||||
|
||||
MaybeObject* Object::GetElement(uint32_t index) {
|
||||
// GetElement can trigger a getter which can cause allocation.
|
||||
// This was not always the case. This ASSERT is here to catch
|
||||
// leftover incorrect uses.
|
||||
ASSERT(Heap::IsAllocationAllowed());
|
||||
return GetElementWithReceiver(this, index);
|
||||
}
|
||||
|
||||
@ -2615,7 +2619,8 @@ Code::Flags Code::ComputeFlags(Kind kind,
|
||||
ASSERT(extra_ic_state == kNoExtraICState ||
|
||||
(kind == CALL_IC && (ic_state == MONOMORPHIC ||
|
||||
ic_state == MONOMORPHIC_PROTOTYPE_FAILURE)) ||
|
||||
(kind == STORE_IC));
|
||||
(kind == STORE_IC) ||
|
||||
(kind == KEYED_STORE_IC));
|
||||
// Compute the bit mask.
|
||||
int bits = kind << kFlagsKindShift;
|
||||
if (in_loop) bits |= kFlagsICInLoopMask;
|
||||
@ -3737,7 +3742,8 @@ MaybeObject* JSObject::SetHiddenPropertiesObject(Object* hidden_obj) {
|
||||
ASSERT(!IsJSGlobalProxy());
|
||||
return SetPropertyPostInterceptor(Heap::hidden_symbol(),
|
||||
hidden_obj,
|
||||
DONT_ENUM);
|
||||
DONT_ENUM,
|
||||
kNonStrictMode);
|
||||
}
|
||||
|
||||
|
||||
|
59
deps/v8/src/objects.cc
vendored
59
deps/v8/src/objects.cc
vendored
@ -1444,14 +1444,15 @@ MaybeObject* JSObject::AddProperty(String* name,
|
||||
MaybeObject* JSObject::SetPropertyPostInterceptor(
|
||||
String* name,
|
||||
Object* value,
|
||||
PropertyAttributes attributes) {
|
||||
PropertyAttributes attributes,
|
||||
StrictModeFlag strict) {
|
||||
// Check local property, ignore interceptor.
|
||||
LookupResult result;
|
||||
LocalLookupRealNamedProperty(name, &result);
|
||||
if (result.IsFound()) {
|
||||
// An existing property, a map transition or a null descriptor was
|
||||
// found. Use set property to handle all these cases.
|
||||
return SetProperty(&result, name, value, attributes);
|
||||
return SetProperty(&result, name, value, attributes, strict);
|
||||
}
|
||||
// Add a new real property.
|
||||
return AddProperty(name, value, attributes);
|
||||
@ -1576,7 +1577,8 @@ MaybeObject* JSObject::ConvertDescriptorToField(String* name,
|
||||
MaybeObject* JSObject::SetPropertyWithInterceptor(
|
||||
String* name,
|
||||
Object* value,
|
||||
PropertyAttributes attributes) {
|
||||
PropertyAttributes attributes,
|
||||
StrictModeFlag strict) {
|
||||
HandleScope scope;
|
||||
Handle<JSObject> this_handle(this);
|
||||
Handle<String> name_handle(name);
|
||||
@ -1605,7 +1607,8 @@ MaybeObject* JSObject::SetPropertyWithInterceptor(
|
||||
MaybeObject* raw_result =
|
||||
this_handle->SetPropertyPostInterceptor(*name_handle,
|
||||
*value_handle,
|
||||
attributes);
|
||||
attributes,
|
||||
strict);
|
||||
RETURN_IF_SCHEDULED_EXCEPTION();
|
||||
return raw_result;
|
||||
}
|
||||
@ -1613,10 +1616,11 @@ MaybeObject* JSObject::SetPropertyWithInterceptor(
|
||||
|
||||
MaybeObject* JSObject::SetProperty(String* name,
|
||||
Object* value,
|
||||
PropertyAttributes attributes) {
|
||||
PropertyAttributes attributes,
|
||||
StrictModeFlag strict) {
|
||||
LookupResult result;
|
||||
LocalLookup(name, &result);
|
||||
return SetProperty(&result, name, value, attributes);
|
||||
return SetProperty(&result, name, value, attributes, strict);
|
||||
}
|
||||
|
||||
|
||||
@ -1896,7 +1900,8 @@ MaybeObject* JSObject::SetPropertyWithFailedAccessCheck(LookupResult* result,
|
||||
MaybeObject* JSObject::SetProperty(LookupResult* result,
|
||||
String* name,
|
||||
Object* value,
|
||||
PropertyAttributes attributes) {
|
||||
PropertyAttributes attributes,
|
||||
StrictModeFlag strict) {
|
||||
// Make sure that the top context does not change when doing callbacks or
|
||||
// interceptor calls.
|
||||
AssertNoContextChange ncc;
|
||||
@ -1923,7 +1928,8 @@ MaybeObject* JSObject::SetProperty(LookupResult* result,
|
||||
Object* proto = GetPrototype();
|
||||
if (proto->IsNull()) return value;
|
||||
ASSERT(proto->IsJSGlobalObject());
|
||||
return JSObject::cast(proto)->SetProperty(result, name, value, attributes);
|
||||
return JSObject::cast(proto)->SetProperty(
|
||||
result, name, value, attributes, strict);
|
||||
}
|
||||
|
||||
if (!result->IsProperty() && !IsJSContextExtensionObject()) {
|
||||
@ -1942,7 +1948,19 @@ MaybeObject* JSObject::SetProperty(LookupResult* result,
|
||||
// Neither properties nor transitions found.
|
||||
return AddProperty(name, value, attributes);
|
||||
}
|
||||
if (result->IsReadOnly() && result->IsProperty()) return value;
|
||||
if (result->IsReadOnly() && result->IsProperty()) {
|
||||
if (strict == kStrictMode) {
|
||||
HandleScope scope;
|
||||
Handle<String> key(name);
|
||||
Handle<Object> holder(this);
|
||||
Handle<Object> args[2] = { key, holder };
|
||||
return Top::Throw(*Factory::NewTypeError("strict_read_only_property",
|
||||
HandleVector(args, 2)));
|
||||
|
||||
} else {
|
||||
return value;
|
||||
}
|
||||
}
|
||||
// This is a real property that is not read-only, or it is a
|
||||
// transition or null descriptor and there are no setters in the prototypes.
|
||||
switch (result->type()) {
|
||||
@ -1970,7 +1988,7 @@ MaybeObject* JSObject::SetProperty(LookupResult* result,
|
||||
value,
|
||||
result->holder());
|
||||
case INTERCEPTOR:
|
||||
return SetPropertyWithInterceptor(name, value, attributes);
|
||||
return SetPropertyWithInterceptor(name, value, attributes, strict);
|
||||
case CONSTANT_TRANSITION: {
|
||||
// If the same constant function is being added we can simply
|
||||
// transition to the target map.
|
||||
@ -5476,9 +5494,11 @@ uint32_t JSFunction::SourceHash() {
|
||||
|
||||
bool JSFunction::IsInlineable() {
|
||||
if (IsBuiltin()) return false;
|
||||
SharedFunctionInfo* shared_info = shared();
|
||||
// Check that the function has a script associated with it.
|
||||
if (!shared()->script()->IsScript()) return false;
|
||||
Code* code = shared()->code();
|
||||
if (!shared_info->script()->IsScript()) return false;
|
||||
if (shared_info->optimization_disabled()) return false;
|
||||
Code* code = shared_info->code();
|
||||
if (code->kind() == Code::OPTIMIZED_FUNCTION) return true;
|
||||
// If we never ran this (unlikely) then lets try to optimize it.
|
||||
if (code->kind() != Code::FUNCTION) return true;
|
||||
@ -6285,7 +6305,8 @@ void Code::PrintExtraICState(FILE* out, Kind kind, ExtraICState extra) {
|
||||
}
|
||||
break;
|
||||
case STORE_IC:
|
||||
if (extra == StoreIC::kStoreICStrict) {
|
||||
case KEYED_STORE_IC:
|
||||
if (extra == kStrictMode) {
|
||||
name = "STRICT";
|
||||
}
|
||||
break;
|
||||
@ -7277,8 +7298,10 @@ MaybeObject* JSObject::GetElementPostInterceptor(Object* receiver,
|
||||
case EXTERNAL_INT_ELEMENTS:
|
||||
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
|
||||
case EXTERNAL_FLOAT_ELEMENTS: {
|
||||
MaybeObject* value = GetExternalElement(index);
|
||||
if (!value->ToObjectUnchecked()->IsUndefined()) return value;
|
||||
MaybeObject* maybe_value = GetExternalElement(index);
|
||||
Object* value;
|
||||
if (!maybe_value->ToObject(&value)) return maybe_value;
|
||||
if (!value->IsUndefined()) return value;
|
||||
break;
|
||||
}
|
||||
case DICTIONARY_ELEMENTS: {
|
||||
@ -7374,8 +7397,10 @@ MaybeObject* JSObject::GetElementWithReceiver(Object* receiver,
|
||||
case EXTERNAL_INT_ELEMENTS:
|
||||
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
|
||||
case EXTERNAL_FLOAT_ELEMENTS: {
|
||||
MaybeObject* value = GetExternalElement(index);
|
||||
if (!value->ToObjectUnchecked()->IsUndefined()) return value;
|
||||
MaybeObject* maybe_value = GetExternalElement(index);
|
||||
Object* value;
|
||||
if (!maybe_value->ToObject(&value)) return maybe_value;
|
||||
if (!value->IsUndefined()) return value;
|
||||
break;
|
||||
}
|
||||
case DICTIONARY_ELEMENTS: {
|
||||
|
12
deps/v8/src/objects.h
vendored
12
deps/v8/src/objects.h
vendored
@ -1361,11 +1361,13 @@ class JSObject: public HeapObject {
|
||||
|
||||
MUST_USE_RESULT MaybeObject* SetProperty(String* key,
|
||||
Object* value,
|
||||
PropertyAttributes attributes);
|
||||
PropertyAttributes attributes,
|
||||
StrictModeFlag strict);
|
||||
MUST_USE_RESULT MaybeObject* SetProperty(LookupResult* result,
|
||||
String* key,
|
||||
Object* value,
|
||||
PropertyAttributes attributes);
|
||||
PropertyAttributes attributes,
|
||||
StrictModeFlag strict);
|
||||
MUST_USE_RESULT MaybeObject* SetPropertyWithFailedAccessCheck(
|
||||
LookupResult* result,
|
||||
String* name,
|
||||
@ -1380,11 +1382,13 @@ class JSObject: public HeapObject {
|
||||
MUST_USE_RESULT MaybeObject* SetPropertyWithInterceptor(
|
||||
String* name,
|
||||
Object* value,
|
||||
PropertyAttributes attributes);
|
||||
PropertyAttributes attributes,
|
||||
StrictModeFlag strict);
|
||||
MUST_USE_RESULT MaybeObject* SetPropertyPostInterceptor(
|
||||
String* name,
|
||||
Object* value,
|
||||
PropertyAttributes attributes);
|
||||
PropertyAttributes attributes,
|
||||
StrictModeFlag strict);
|
||||
MUST_USE_RESULT MaybeObject* SetLocalPropertyIgnoreAttributes(
|
||||
String* key,
|
||||
Object* value,
|
||||
|
72
deps/v8/src/parser.cc
vendored
72
deps/v8/src/parser.cc
vendored
@ -1106,7 +1106,20 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
|
||||
}
|
||||
|
||||
Scanner::Location token_loc = scanner().peek_location();
|
||||
Statement* stat = ParseStatement(NULL, CHECK_OK);
|
||||
|
||||
Statement* stat;
|
||||
if (peek() == Token::FUNCTION) {
|
||||
// FunctionDeclaration is only allowed in the context of SourceElements
|
||||
// (Ecma 262 5th Edition, clause 14):
|
||||
// SourceElement:
|
||||
// Statement
|
||||
// FunctionDeclaration
|
||||
// Common language extension is to allow function declaration in place
|
||||
// of any statement. This language extension is disabled in strict mode.
|
||||
stat = ParseFunctionDeclaration(CHECK_OK);
|
||||
} else {
|
||||
stat = ParseStatement(NULL, CHECK_OK);
|
||||
}
|
||||
|
||||
if (stat == NULL || stat->IsEmpty()) {
|
||||
directive_prologue = false; // End of directive prologue.
|
||||
@ -1263,8 +1276,17 @@ Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) {
|
||||
return result;
|
||||
}
|
||||
|
||||
case Token::FUNCTION:
|
||||
case Token::FUNCTION: {
|
||||
// In strict mode, FunctionDeclaration is only allowed in the context
|
||||
// of SourceElements.
|
||||
if (temp_scope_->StrictMode()) {
|
||||
ReportMessageAt(scanner().peek_location(), "strict_function",
|
||||
Vector<const char*>::empty());
|
||||
*ok = false;
|
||||
return NULL;
|
||||
}
|
||||
return ParseFunctionDeclaration(ok);
|
||||
}
|
||||
|
||||
case Token::NATIVE:
|
||||
return ParseNativeDeclaration(ok);
|
||||
@ -1515,6 +1537,11 @@ Block* Parser::ParseVariableDeclarations(bool accept_IN,
|
||||
Consume(Token::VAR);
|
||||
} else if (peek() == Token::CONST) {
|
||||
Consume(Token::CONST);
|
||||
if (temp_scope_->StrictMode()) {
|
||||
ReportMessage("strict_const", Vector<const char*>::empty());
|
||||
*ok = false;
|
||||
return NULL;
|
||||
}
|
||||
mode = Variable::CONST;
|
||||
is_const = true;
|
||||
} else {
|
||||
@ -1634,34 +1661,49 @@ Block* Parser::ParseVariableDeclarations(bool accept_IN,
|
||||
|
||||
if (top_scope_->is_global_scope()) {
|
||||
// Compute the arguments for the runtime call.
|
||||
ZoneList<Expression*>* arguments = new ZoneList<Expression*>(2);
|
||||
// Be careful not to assign a value to the global variable if
|
||||
// we're in a with. The initialization value should not
|
||||
// necessarily be stored in the global object in that case,
|
||||
// which is why we need to generate a separate assignment node.
|
||||
ZoneList<Expression*>* arguments = new ZoneList<Expression*>(3);
|
||||
arguments->Add(new Literal(name)); // we have at least 1 parameter
|
||||
if (is_const || (value != NULL && !inside_with())) {
|
||||
CallRuntime* initialize;
|
||||
|
||||
if (is_const) {
|
||||
arguments->Add(value);
|
||||
value = NULL; // zap the value to avoid the unnecessary assignment
|
||||
}
|
||||
// Construct the call to Runtime::DeclareGlobal{Variable,Const}Locally
|
||||
// and add it to the initialization statement block. Note that
|
||||
// this function does different things depending on if we have
|
||||
// 1 or 2 parameters.
|
||||
CallRuntime* initialize;
|
||||
if (is_const) {
|
||||
|
||||
// Construct the call to Runtime_InitializeConstGlobal
|
||||
// and add it to the initialization statement block.
|
||||
// Note that the function does different things depending on
|
||||
// the number of arguments (1 or 2).
|
||||
initialize =
|
||||
new CallRuntime(
|
||||
Factory::InitializeConstGlobal_symbol(),
|
||||
Runtime::FunctionForId(Runtime::kInitializeConstGlobal),
|
||||
arguments);
|
||||
} else {
|
||||
// Add strict mode.
|
||||
// We may want to pass singleton to avoid Literal allocations.
|
||||
arguments->Add(NewNumberLiteral(
|
||||
temp_scope_->StrictMode() ? kStrictMode : kNonStrictMode));
|
||||
|
||||
// Be careful not to assign a value to the global variable if
|
||||
// we're in a with. The initialization value should not
|
||||
// necessarily be stored in the global object in that case,
|
||||
// which is why we need to generate a separate assignment node.
|
||||
if (value != NULL && !inside_with()) {
|
||||
arguments->Add(value);
|
||||
value = NULL; // zap the value to avoid the unnecessary assignment
|
||||
}
|
||||
|
||||
// Construct the call to Runtime_InitializeVarGlobal
|
||||
// and add it to the initialization statement block.
|
||||
// Note that the function does different things depending on
|
||||
// the number of arguments (2 or 3).
|
||||
initialize =
|
||||
new CallRuntime(
|
||||
Factory::InitializeVarGlobal_symbol(),
|
||||
Runtime::FunctionForId(Runtime::kInitializeVarGlobal),
|
||||
arguments);
|
||||
}
|
||||
|
||||
block->AddStatement(new ExpressionStatement(initialize));
|
||||
}
|
||||
|
||||
|
130
deps/v8/src/platform-solaris.cc
vendored
130
deps/v8/src/platform-solaris.cc
vendored
@ -45,7 +45,7 @@
|
||||
#include <errno.h>
|
||||
#include <ieeefp.h> // finite()
|
||||
#include <signal.h> // sigemptyset(), etc
|
||||
#include <sys/kdi_regs.h>
|
||||
#include <sys/regset.h>
|
||||
|
||||
|
||||
#undef MAP_TYPE
|
||||
@ -612,11 +612,16 @@ static Sampler* active_sampler_ = NULL;
|
||||
static pthread_t vm_tid_ = 0;
|
||||
|
||||
|
||||
static pthread_t GetThreadID() {
|
||||
return pthread_self();
|
||||
}
|
||||
|
||||
|
||||
static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
|
||||
USE(info);
|
||||
if (signal != SIGPROF) return;
|
||||
if (active_sampler_ == NULL || !active_sampler_->IsActive()) return;
|
||||
if (vm_tid_ != pthread_self()) return;
|
||||
if (vm_tid_ != GetThreadID()) return;
|
||||
|
||||
TickSample sample_obj;
|
||||
TickSample* sample = CpuProfiler::TickSampleEvent();
|
||||
@ -627,17 +632,10 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
|
||||
mcontext_t& mcontext = ucontext->uc_mcontext;
|
||||
sample->state = Top::current_vm_state();
|
||||
|
||||
#if V8_HOST_ARCH_IA32
|
||||
sample->pc = reinterpret_cast<Address>(mcontext.gregs[KDIREG_EIP]);
|
||||
sample->sp = reinterpret_cast<Address>(mcontext.gregs[KDIREG_ESP]);
|
||||
sample->fp = reinterpret_cast<Address>(mcontext.gregs[KDIREG_EBP]);
|
||||
#elif V8_HOST_ARCH_X64
|
||||
sample->pc = reinterpret_cast<Address>(mcontext.gregs[KDIREG_RIP]);
|
||||
sample->sp = reinterpret_cast<Address>(mcontext.gregs[KDIREG_RSP]);
|
||||
sample->fp = reinterpret_cast<Address>(mcontext.gregs[KDIREG_RBP]);
|
||||
#else
|
||||
UNIMPLEMENTED();
|
||||
#endif
|
||||
sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_PC]);
|
||||
sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_SP]);
|
||||
sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_FP]);
|
||||
|
||||
active_sampler_->SampleStack(sample);
|
||||
active_sampler_->Tick(sample);
|
||||
}
|
||||
@ -645,26 +643,86 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
|
||||
|
||||
class Sampler::PlatformData : public Malloced {
|
||||
public:
|
||||
PlatformData() {
|
||||
signal_handler_installed_ = false;
|
||||
enum SleepInterval {
|
||||
FULL_INTERVAL,
|
||||
HALF_INTERVAL
|
||||
};
|
||||
|
||||
explicit PlatformData(Sampler* sampler)
|
||||
: sampler_(sampler),
|
||||
signal_handler_installed_(false),
|
||||
vm_tgid_(getpid()),
|
||||
signal_sender_launched_(false) {
|
||||
}
|
||||
|
||||
void SignalSender() {
|
||||
while (sampler_->IsActive()) {
|
||||
if (rate_limiter_.SuspendIfNecessary()) continue;
|
||||
if (sampler_->IsProfiling() && RuntimeProfiler::IsEnabled()) {
|
||||
SendProfilingSignal();
|
||||
Sleep(HALF_INTERVAL);
|
||||
RuntimeProfiler::NotifyTick();
|
||||
Sleep(HALF_INTERVAL);
|
||||
} else {
|
||||
if (sampler_->IsProfiling()) SendProfilingSignal();
|
||||
if (RuntimeProfiler::IsEnabled()) RuntimeProfiler::NotifyTick();
|
||||
Sleep(FULL_INTERVAL);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void SendProfilingSignal() {
|
||||
if (!signal_handler_installed_) return;
|
||||
pthread_kill(vm_tid_, SIGPROF);
|
||||
}
|
||||
|
||||
void Sleep(SleepInterval full_or_half) {
|
||||
// Convert ms to us and subtract 100 us to compensate delays
|
||||
// occuring during signal delivery.
|
||||
useconds_t interval = sampler_->interval_ * 1000 - 100;
|
||||
if (full_or_half == HALF_INTERVAL) interval /= 2;
|
||||
int result = usleep(interval);
|
||||
#ifdef DEBUG
|
||||
if (result != 0 && errno != EINTR) {
|
||||
fprintf(stderr,
|
||||
"SignalSender usleep error; interval = %u, errno = %d\n",
|
||||
interval,
|
||||
errno);
|
||||
ASSERT(result == 0 || errno == EINTR);
|
||||
}
|
||||
#endif
|
||||
USE(result);
|
||||
}
|
||||
|
||||
Sampler* sampler_;
|
||||
bool signal_handler_installed_;
|
||||
struct sigaction old_signal_handler_;
|
||||
struct itimerval old_timer_value_;
|
||||
int vm_tgid_;
|
||||
bool signal_sender_launched_;
|
||||
pthread_t signal_sender_thread_;
|
||||
RuntimeProfilerRateLimiter rate_limiter_;
|
||||
};
|
||||
|
||||
|
||||
static void* SenderEntry(void* arg) {
|
||||
Sampler::PlatformData* data =
|
||||
reinterpret_cast<Sampler::PlatformData*>(arg);
|
||||
data->SignalSender();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
Sampler::Sampler(int interval)
|
||||
: interval_(interval),
|
||||
profiling_(false),
|
||||
active_(false),
|
||||
samples_taken_(0) {
|
||||
data_ = new PlatformData();
|
||||
data_ = new PlatformData(this);
|
||||
}
|
||||
|
||||
|
||||
Sampler::~Sampler() {
|
||||
ASSERT(!data_->signal_sender_launched_);
|
||||
delete data_;
|
||||
}
|
||||
|
||||
@ -672,43 +730,53 @@ Sampler::~Sampler() {
|
||||
void Sampler::Start() {
|
||||
// There can only be one active sampler at the time on POSIX
|
||||
// platforms.
|
||||
if (active_sampler_ != NULL) return;
|
||||
ASSERT(!IsActive());
|
||||
vm_tid_ = GetThreadID();
|
||||
|
||||
// Request profiling signals.
|
||||
struct sigaction sa;
|
||||
sa.sa_sigaction = ProfilerSignalHandler;
|
||||
sigemptyset(&sa.sa_mask);
|
||||
sa.sa_flags = SA_SIGINFO;
|
||||
if (sigaction(SIGPROF, &sa, &data_->old_signal_handler_) != 0) return;
|
||||
data_->signal_handler_installed_ = true;
|
||||
sa.sa_flags = SA_RESTART | SA_SIGINFO;
|
||||
data_->signal_handler_installed_ =
|
||||
sigaction(SIGPROF, &sa, &data_->old_signal_handler_) == 0;
|
||||
|
||||
// Set the itimer to generate a tick for each interval.
|
||||
itimerval itimer;
|
||||
itimer.it_interval.tv_sec = interval_ / 1000;
|
||||
itimer.it_interval.tv_usec = (interval_ % 1000) * 1000;
|
||||
itimer.it_value.tv_sec = itimer.it_interval.tv_sec;
|
||||
itimer.it_value.tv_usec = itimer.it_interval.tv_usec;
|
||||
setitimer(ITIMER_PROF, &itimer, &data_->old_timer_value_);
|
||||
// Start a thread that sends SIGPROF signal to VM thread.
|
||||
// Sending the signal ourselves instead of relying on itimer provides
|
||||
// much better accuracy.
|
||||
SetActive(true);
|
||||
if (pthread_create(
|
||||
&data_->signal_sender_thread_, NULL, SenderEntry, data_) == 0) {
|
||||
data_->signal_sender_launched_ = true;
|
||||
}
|
||||
|
||||
// Set this sampler as the active sampler.
|
||||
active_sampler_ = this;
|
||||
active_ = true;
|
||||
}
|
||||
|
||||
|
||||
void Sampler::Stop() {
|
||||
SetActive(false);
|
||||
|
||||
// Wait for signal sender termination (it will exit after setting
|
||||
// active_ to false).
|
||||
if (data_->signal_sender_launched_) {
|
||||
Top::WakeUpRuntimeProfilerThreadBeforeShutdown();
|
||||
pthread_join(data_->signal_sender_thread_, NULL);
|
||||
data_->signal_sender_launched_ = false;
|
||||
}
|
||||
|
||||
// Restore old signal handler
|
||||
if (data_->signal_handler_installed_) {
|
||||
setitimer(ITIMER_PROF, &data_->old_timer_value_, NULL);
|
||||
sigaction(SIGPROF, &data_->old_signal_handler_, 0);
|
||||
data_->signal_handler_installed_ = false;
|
||||
}
|
||||
|
||||
// This sampler is no longer the active sampler.
|
||||
active_sampler_ = NULL;
|
||||
active_ = false;
|
||||
}
|
||||
|
||||
|
||||
#endif // ENABLE_LOGGING_AND_PROFILING
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
28
deps/v8/src/profile-generator-inl.h
vendored
28
deps/v8/src/profile-generator-inl.h
vendored
@ -121,34 +121,6 @@ uint64_t HeapEntry::id() {
|
||||
return id_adaptor.returned_id;
|
||||
}
|
||||
|
||||
|
||||
template<class Visitor>
|
||||
void HeapEntriesMap::UpdateEntries(Visitor* visitor) {
|
||||
for (HashMap::Entry* p = entries_.Start();
|
||||
p != NULL;
|
||||
p = entries_.Next(p)) {
|
||||
EntryInfo* entry_info = reinterpret_cast<EntryInfo*>(p->value);
|
||||
entry_info->entry = visitor->GetEntry(
|
||||
reinterpret_cast<HeapObject*>(p->key),
|
||||
entry_info->children_count,
|
||||
entry_info->retainers_count);
|
||||
entry_info->children_count = 0;
|
||||
entry_info->retainers_count = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool HeapSnapshotGenerator::ReportProgress(bool force) {
|
||||
const int kProgressReportGranularity = 10000;
|
||||
if (control_ != NULL
|
||||
&& (force || progress_counter_ % kProgressReportGranularity == 0)) {
|
||||
return
|
||||
control_->ReportProgressValue(progress_counter_, progress_total_) ==
|
||||
v8::ActivityControl::kContinue;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // ENABLE_LOGGING_AND_PROFILING
|
||||
|
775
deps/v8/src/profile-generator.cc
vendored
775
deps/v8/src/profile-generator.cc
vendored
@ -1177,12 +1177,6 @@ void HeapGraphPath::Print() {
|
||||
}
|
||||
|
||||
|
||||
HeapObject *const HeapSnapshot::kInternalRootObject =
|
||||
reinterpret_cast<HeapObject*>(1);
|
||||
HeapObject *const HeapSnapshot::kGcRootsObject =
|
||||
reinterpret_cast<HeapObject*>(2);
|
||||
|
||||
|
||||
// It is very important to keep objects that form a heap snapshot
|
||||
// as small as possible.
|
||||
namespace { // Avoid littering the global namespace.
|
||||
@ -1253,96 +1247,6 @@ void HeapSnapshot::AllocateEntries(int entries_count,
|
||||
}
|
||||
|
||||
|
||||
HeapEntry* HeapSnapshot::AddEntry(HeapObject* object,
|
||||
int children_count,
|
||||
int retainers_count) {
|
||||
if (object == kInternalRootObject) {
|
||||
ASSERT(root_entry_ == NULL);
|
||||
ASSERT(retainers_count == 0);
|
||||
return (root_entry_ = AddEntry(HeapEntry::kObject,
|
||||
"",
|
||||
HeapObjectsMap::kInternalRootObjectId,
|
||||
0,
|
||||
children_count,
|
||||
retainers_count));
|
||||
} else if (object == kGcRootsObject) {
|
||||
ASSERT(gc_roots_entry_ == NULL);
|
||||
return (gc_roots_entry_ = AddEntry(HeapEntry::kObject,
|
||||
"(GC roots)",
|
||||
HeapObjectsMap::kGcRootsObjectId,
|
||||
0,
|
||||
children_count,
|
||||
retainers_count));
|
||||
} else if (object->IsJSFunction()) {
|
||||
JSFunction* func = JSFunction::cast(object);
|
||||
SharedFunctionInfo* shared = func->shared();
|
||||
return AddEntry(object,
|
||||
HeapEntry::kClosure,
|
||||
collection_->GetName(String::cast(shared->name())),
|
||||
children_count,
|
||||
retainers_count);
|
||||
} else if (object->IsJSRegExp()) {
|
||||
JSRegExp* re = JSRegExp::cast(object);
|
||||
return AddEntry(object,
|
||||
HeapEntry::kRegExp,
|
||||
collection_->GetName(re->Pattern()),
|
||||
children_count,
|
||||
retainers_count);
|
||||
} else if (object->IsJSObject()) {
|
||||
return AddEntry(object,
|
||||
HeapEntry::kObject,
|
||||
collection_->GetName(GetConstructorNameForHeapProfile(
|
||||
JSObject::cast(object))),
|
||||
children_count,
|
||||
retainers_count);
|
||||
} else if (object->IsString()) {
|
||||
return AddEntry(object,
|
||||
HeapEntry::kString,
|
||||
collection_->GetName(String::cast(object)),
|
||||
children_count,
|
||||
retainers_count);
|
||||
} else if (object->IsCode()) {
|
||||
return AddEntry(object,
|
||||
HeapEntry::kCode,
|
||||
"",
|
||||
children_count,
|
||||
retainers_count);
|
||||
} else if (object->IsSharedFunctionInfo()) {
|
||||
SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
|
||||
return AddEntry(object,
|
||||
HeapEntry::kCode,
|
||||
collection_->GetName(String::cast(shared->name())),
|
||||
children_count,
|
||||
retainers_count);
|
||||
} else if (object->IsScript()) {
|
||||
Script* script = Script::cast(object);
|
||||
return AddEntry(object,
|
||||
HeapEntry::kCode,
|
||||
script->name()->IsString() ?
|
||||
collection_->GetName(String::cast(script->name())) : "",
|
||||
children_count,
|
||||
retainers_count);
|
||||
} else if (object->IsFixedArray()) {
|
||||
return AddEntry(object,
|
||||
HeapEntry::kArray,
|
||||
"",
|
||||
children_count,
|
||||
retainers_count);
|
||||
} else if (object->IsHeapNumber()) {
|
||||
return AddEntry(object,
|
||||
HeapEntry::kHeapNumber,
|
||||
"number",
|
||||
children_count,
|
||||
retainers_count);
|
||||
}
|
||||
return AddEntry(object,
|
||||
HeapEntry::kHidden,
|
||||
"system",
|
||||
children_count,
|
||||
retainers_count);
|
||||
}
|
||||
|
||||
|
||||
static void HeapEntryClearPaint(HeapEntry** entry_ptr) {
|
||||
(*entry_ptr)->clear_paint();
|
||||
}
|
||||
@ -1352,17 +1256,26 @@ void HeapSnapshot::ClearPaint() {
|
||||
}
|
||||
|
||||
|
||||
HeapEntry* HeapSnapshot::AddEntry(HeapObject* object,
|
||||
HeapEntry::Type type,
|
||||
const char* name,
|
||||
int children_count,
|
||||
int retainers_count) {
|
||||
return AddEntry(type,
|
||||
name,
|
||||
collection_->GetObjectId(object->address()),
|
||||
object->Size(),
|
||||
HeapEntry* HeapSnapshot::AddRootEntry(int children_count) {
|
||||
ASSERT(root_entry_ == NULL);
|
||||
return (root_entry_ = AddEntry(HeapEntry::kObject,
|
||||
"",
|
||||
HeapObjectsMap::kInternalRootObjectId,
|
||||
0,
|
||||
children_count,
|
||||
retainers_count);
|
||||
0));
|
||||
}
|
||||
|
||||
|
||||
HeapEntry* HeapSnapshot::AddGcRootsEntry(int children_count,
|
||||
int retainers_count) {
|
||||
ASSERT(gc_roots_entry_ == NULL);
|
||||
return (gc_roots_entry_ = AddEntry(HeapEntry::kObject,
|
||||
"(GC roots)",
|
||||
HeapObjectsMap::kGcRootsObjectId,
|
||||
0,
|
||||
children_count,
|
||||
retainers_count));
|
||||
}
|
||||
|
||||
|
||||
@ -1615,7 +1528,7 @@ HeapEntry *const HeapEntriesMap::kHeapEntryPlaceholder =
|
||||
reinterpret_cast<HeapEntry*>(1);
|
||||
|
||||
HeapEntriesMap::HeapEntriesMap()
|
||||
: entries_(HeapObjectsMatch),
|
||||
: entries_(HeapThingsMatch),
|
||||
entries_count_(0),
|
||||
total_children_count_(0),
|
||||
total_retainers_count_(0) {
|
||||
@ -1629,8 +1542,23 @@ HeapEntriesMap::~HeapEntriesMap() {
|
||||
}
|
||||
|
||||
|
||||
HeapEntry* HeapEntriesMap::Map(HeapObject* object) {
|
||||
HashMap::Entry* cache_entry = entries_.Lookup(object, Hash(object), false);
|
||||
void HeapEntriesMap::AllocateEntries() {
|
||||
for (HashMap::Entry* p = entries_.Start();
|
||||
p != NULL;
|
||||
p = entries_.Next(p)) {
|
||||
EntryInfo* entry_info = reinterpret_cast<EntryInfo*>(p->value);
|
||||
entry_info->entry = entry_info->allocator->AllocateEntry(
|
||||
p->key,
|
||||
entry_info->children_count,
|
||||
entry_info->retainers_count);
|
||||
entry_info->children_count = 0;
|
||||
entry_info->retainers_count = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
HeapEntry* HeapEntriesMap::Map(HeapThing thing) {
|
||||
HashMap::Entry* cache_entry = entries_.Lookup(thing, Hash(thing), false);
|
||||
if (cache_entry != NULL) {
|
||||
EntryInfo* entry_info = reinterpret_cast<EntryInfo*>(cache_entry->value);
|
||||
return entry_info->entry;
|
||||
@ -1640,15 +1568,16 @@ HeapEntry* HeapEntriesMap::Map(HeapObject* object) {
|
||||
}
|
||||
|
||||
|
||||
void HeapEntriesMap::Pair(HeapObject* object, HeapEntry* entry) {
|
||||
HashMap::Entry* cache_entry = entries_.Lookup(object, Hash(object), true);
|
||||
void HeapEntriesMap::Pair(
|
||||
HeapThing thing, HeapEntriesAllocator* allocator, HeapEntry* entry) {
|
||||
HashMap::Entry* cache_entry = entries_.Lookup(thing, Hash(thing), true);
|
||||
ASSERT(cache_entry->value == NULL);
|
||||
cache_entry->value = new EntryInfo(entry);
|
||||
cache_entry->value = new EntryInfo(entry, allocator);
|
||||
++entries_count_;
|
||||
}
|
||||
|
||||
|
||||
void HeapEntriesMap::CountReference(HeapObject* from, HeapObject* to,
|
||||
void HeapEntriesMap::CountReference(HeapThing from, HeapThing to,
|
||||
int* prev_children_count,
|
||||
int* prev_retainers_count) {
|
||||
HashMap::Entry* from_cache_entry = entries_.Lookup(from, Hash(from), false);
|
||||
@ -1671,7 +1600,7 @@ void HeapEntriesMap::CountReference(HeapObject* from, HeapObject* to,
|
||||
|
||||
|
||||
HeapObjectsSet::HeapObjectsSet()
|
||||
: entries_(HeapEntriesMap::HeapObjectsMatch) {
|
||||
: entries_(HeapEntriesMap::HeapThingsMatch) {
|
||||
}
|
||||
|
||||
|
||||
@ -1700,206 +1629,144 @@ void HeapObjectsSet::Insert(Object* obj) {
|
||||
}
|
||||
|
||||
|
||||
HeapSnapshotGenerator::HeapSnapshotGenerator(HeapSnapshot* snapshot,
|
||||
v8::ActivityControl* control)
|
||||
HeapObject *const V8HeapExplorer::kInternalRootObject =
|
||||
reinterpret_cast<HeapObject*>(1);
|
||||
HeapObject *const V8HeapExplorer::kGcRootsObject =
|
||||
reinterpret_cast<HeapObject*>(2);
|
||||
|
||||
|
||||
V8HeapExplorer::V8HeapExplorer(
|
||||
HeapSnapshot* snapshot,
|
||||
SnapshottingProgressReportingInterface* progress)
|
||||
: snapshot_(snapshot),
|
||||
control_(control),
|
||||
collection_(snapshot->collection()),
|
||||
collection_(snapshot_->collection()),
|
||||
progress_(progress),
|
||||
filler_(NULL) {
|
||||
}
|
||||
|
||||
class SnapshotCounter : public HeapSnapshotGenerator::SnapshotFillerInterface {
|
||||
public:
|
||||
explicit SnapshotCounter(HeapEntriesMap* entries)
|
||||
: entries_(entries) { }
|
||||
HeapEntry* AddEntry(HeapObject* obj) {
|
||||
entries_->Pair(obj, HeapEntriesMap::kHeapEntryPlaceholder);
|
||||
return HeapEntriesMap::kHeapEntryPlaceholder;
|
||||
}
|
||||
void SetIndexedReference(HeapGraphEdge::Type,
|
||||
HeapObject* parent_obj,
|
||||
HeapEntry*,
|
||||
int,
|
||||
Object* child_obj,
|
||||
HeapEntry*) {
|
||||
entries_->CountReference(parent_obj, HeapObject::cast(child_obj));
|
||||
}
|
||||
void SetNamedReference(HeapGraphEdge::Type,
|
||||
HeapObject* parent_obj,
|
||||
HeapEntry*,
|
||||
const char*,
|
||||
Object* child_obj,
|
||||
HeapEntry*) {
|
||||
entries_->CountReference(parent_obj, HeapObject::cast(child_obj));
|
||||
}
|
||||
void SetRootShortcutReference(Object* child_obj, HeapEntry*) {
|
||||
entries_->CountReference(
|
||||
HeapSnapshot::kInternalRootObject, HeapObject::cast(child_obj));
|
||||
}
|
||||
void SetRootGcRootsReference() {
|
||||
entries_->CountReference(
|
||||
HeapSnapshot::kInternalRootObject, HeapSnapshot::kGcRootsObject);
|
||||
}
|
||||
void SetStrongRootReference(Object* child_obj, HeapEntry*) {
|
||||
entries_->CountReference(
|
||||
HeapSnapshot::kGcRootsObject, HeapObject::cast(child_obj));
|
||||
}
|
||||
private:
|
||||
HeapEntriesMap* entries_;
|
||||
};
|
||||
|
||||
|
||||
class SnapshotFiller : public HeapSnapshotGenerator::SnapshotFillerInterface {
|
||||
public:
|
||||
explicit SnapshotFiller(HeapSnapshot* snapshot, HeapEntriesMap* entries)
|
||||
: snapshot_(snapshot),
|
||||
collection_(snapshot->collection()),
|
||||
entries_(entries) { }
|
||||
HeapEntry* AddEntry(HeapObject* obj) {
|
||||
UNREACHABLE();
|
||||
return NULL;
|
||||
}
|
||||
void SetIndexedReference(HeapGraphEdge::Type type,
|
||||
HeapObject* parent_obj,
|
||||
HeapEntry* parent_entry,
|
||||
int index,
|
||||
Object* child_obj,
|
||||
HeapEntry* child_entry) {
|
||||
int child_index, retainer_index;
|
||||
entries_->CountReference(parent_obj,
|
||||
HeapObject::cast(child_obj),
|
||||
&child_index,
|
||||
&retainer_index);
|
||||
parent_entry->SetIndexedReference(
|
||||
type, child_index, index, child_entry, retainer_index);
|
||||
}
|
||||
void SetNamedReference(HeapGraphEdge::Type type,
|
||||
HeapObject* parent_obj,
|
||||
HeapEntry* parent_entry,
|
||||
const char* reference_name,
|
||||
Object* child_obj,
|
||||
HeapEntry* child_entry) {
|
||||
int child_index, retainer_index;
|
||||
entries_->CountReference(parent_obj, HeapObject::cast(child_obj),
|
||||
&child_index, &retainer_index);
|
||||
parent_entry->SetNamedReference(type,
|
||||
child_index,
|
||||
reference_name,
|
||||
child_entry,
|
||||
retainer_index);
|
||||
}
|
||||
void SetRootGcRootsReference() {
|
||||
int child_index, retainer_index;
|
||||
entries_->CountReference(HeapSnapshot::kInternalRootObject,
|
||||
HeapSnapshot::kGcRootsObject,
|
||||
&child_index,
|
||||
&retainer_index);
|
||||
snapshot_->root()->SetIndexedReference(HeapGraphEdge::kElement,
|
||||
child_index,
|
||||
child_index + 1,
|
||||
snapshot_->gc_roots(),
|
||||
retainer_index);
|
||||
}
|
||||
void SetRootShortcutReference(Object* child_obj,
|
||||
HeapEntry* child_entry) {
|
||||
int child_index, retainer_index;
|
||||
entries_->CountReference(HeapSnapshot::kInternalRootObject,
|
||||
HeapObject::cast(child_obj),
|
||||
&child_index,
|
||||
&retainer_index);
|
||||
snapshot_->root()->SetNamedReference(HeapGraphEdge::kShortcut,
|
||||
child_index,
|
||||
collection_->GetName(child_index + 1),
|
||||
child_entry,
|
||||
retainer_index);
|
||||
}
|
||||
void SetStrongRootReference(Object* child_obj,
|
||||
HeapEntry* child_entry) {
|
||||
int child_index, retainer_index;
|
||||
entries_->CountReference(HeapSnapshot::kGcRootsObject,
|
||||
HeapObject::cast(child_obj),
|
||||
&child_index,
|
||||
&retainer_index);
|
||||
snapshot_->gc_roots()->SetIndexedReference(HeapGraphEdge::kElement,
|
||||
child_index,
|
||||
child_index + 1,
|
||||
child_entry,
|
||||
retainer_index);
|
||||
}
|
||||
private:
|
||||
HeapSnapshot* snapshot_;
|
||||
HeapSnapshotsCollection* collection_;
|
||||
HeapEntriesMap* entries_;
|
||||
};
|
||||
|
||||
class SnapshotAllocator {
|
||||
public:
|
||||
explicit SnapshotAllocator(HeapSnapshot* snapshot)
|
||||
: snapshot_(snapshot) { }
|
||||
HeapEntry* GetEntry(
|
||||
HeapObject* obj, int children_count, int retainers_count) {
|
||||
HeapEntry* entry =
|
||||
snapshot_->AddEntry(obj, children_count, retainers_count);
|
||||
ASSERT(entry != NULL);
|
||||
return entry;
|
||||
}
|
||||
private:
|
||||
HeapSnapshot* snapshot_;
|
||||
};
|
||||
|
||||
class RootsReferencesExtractor : public ObjectVisitor {
|
||||
public:
|
||||
explicit RootsReferencesExtractor(HeapSnapshotGenerator* generator)
|
||||
: generator_(generator) {
|
||||
}
|
||||
void VisitPointers(Object** start, Object** end) {
|
||||
for (Object** p = start; p < end; p++) generator_->SetGcRootsReference(*p);
|
||||
}
|
||||
private:
|
||||
HeapSnapshotGenerator* generator_;
|
||||
};
|
||||
|
||||
|
||||
bool HeapSnapshotGenerator::GenerateSnapshot() {
|
||||
AssertNoAllocation no_alloc;
|
||||
|
||||
SetProgressTotal(4); // 2 passes + dominators + sizes.
|
||||
|
||||
// Pass 1. Iterate heap contents to count entries and references.
|
||||
if (!CountEntriesAndReferences()) return false;
|
||||
|
||||
// Allocate and fill entries in the snapshot, allocate references.
|
||||
snapshot_->AllocateEntries(entries_.entries_count(),
|
||||
entries_.total_children_count(),
|
||||
entries_.total_retainers_count());
|
||||
SnapshotAllocator allocator(snapshot_);
|
||||
entries_.UpdateEntries(&allocator);
|
||||
|
||||
// Pass 2. Fill references.
|
||||
if (!FillReferences()) return false;
|
||||
|
||||
if (!SetEntriesDominators()) return false;
|
||||
if (!ApproximateRetainedSizes()) return false;
|
||||
|
||||
progress_counter_ = progress_total_;
|
||||
if (!ReportProgress(true)) return false;
|
||||
return true;
|
||||
V8HeapExplorer::~V8HeapExplorer() {
|
||||
}
|
||||
|
||||
|
||||
HeapEntry* HeapSnapshotGenerator::GetEntry(Object* obj) {
|
||||
if (!obj->IsHeapObject()) return NULL;
|
||||
HeapObject* object = HeapObject::cast(obj);
|
||||
HeapEntry* entry = entries_.Map(object);
|
||||
// A new entry.
|
||||
if (entry == NULL) entry = filler_->AddEntry(object);
|
||||
return entry;
|
||||
HeapEntry* V8HeapExplorer::AllocateEntry(
|
||||
HeapThing ptr, int children_count, int retainers_count) {
|
||||
return AddEntry(
|
||||
reinterpret_cast<HeapObject*>(ptr), children_count, retainers_count);
|
||||
}
|
||||
|
||||
|
||||
HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object,
|
||||
int children_count,
|
||||
int retainers_count) {
|
||||
if (object == kInternalRootObject) {
|
||||
ASSERT(retainers_count == 0);
|
||||
return snapshot_->AddRootEntry(children_count);
|
||||
} else if (object == kGcRootsObject) {
|
||||
return snapshot_->AddGcRootsEntry(children_count, retainers_count);
|
||||
} else if (object->IsJSFunction()) {
|
||||
JSFunction* func = JSFunction::cast(object);
|
||||
SharedFunctionInfo* shared = func->shared();
|
||||
return AddEntry(object,
|
||||
HeapEntry::kClosure,
|
||||
collection_->GetName(String::cast(shared->name())),
|
||||
children_count,
|
||||
retainers_count);
|
||||
} else if (object->IsJSRegExp()) {
|
||||
JSRegExp* re = JSRegExp::cast(object);
|
||||
return AddEntry(object,
|
||||
HeapEntry::kRegExp,
|
||||
collection_->GetName(re->Pattern()),
|
||||
children_count,
|
||||
retainers_count);
|
||||
} else if (object->IsJSObject()) {
|
||||
return AddEntry(object,
|
||||
HeapEntry::kObject,
|
||||
collection_->GetName(GetConstructorNameForHeapProfile(
|
||||
JSObject::cast(object))),
|
||||
children_count,
|
||||
retainers_count);
|
||||
} else if (object->IsString()) {
|
||||
return AddEntry(object,
|
||||
HeapEntry::kString,
|
||||
collection_->GetName(String::cast(object)),
|
||||
children_count,
|
||||
retainers_count);
|
||||
} else if (object->IsCode()) {
|
||||
return AddEntry(object,
|
||||
HeapEntry::kCode,
|
||||
"",
|
||||
children_count,
|
||||
retainers_count);
|
||||
} else if (object->IsSharedFunctionInfo()) {
|
||||
SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
|
||||
return AddEntry(object,
|
||||
HeapEntry::kCode,
|
||||
collection_->GetName(String::cast(shared->name())),
|
||||
children_count,
|
||||
retainers_count);
|
||||
} else if (object->IsScript()) {
|
||||
Script* script = Script::cast(object);
|
||||
return AddEntry(object,
|
||||
HeapEntry::kCode,
|
||||
script->name()->IsString() ?
|
||||
collection_->GetName(String::cast(script->name())) : "",
|
||||
children_count,
|
||||
retainers_count);
|
||||
} else if (object->IsFixedArray()) {
|
||||
return AddEntry(object,
|
||||
HeapEntry::kArray,
|
||||
"",
|
||||
children_count,
|
||||
retainers_count);
|
||||
} else if (object->IsHeapNumber()) {
|
||||
return AddEntry(object,
|
||||
HeapEntry::kHeapNumber,
|
||||
"number",
|
||||
children_count,
|
||||
retainers_count);
|
||||
}
|
||||
return AddEntry(object,
|
||||
HeapEntry::kHidden,
|
||||
"system",
|
||||
children_count,
|
||||
retainers_count);
|
||||
}
|
||||
|
||||
|
||||
HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object,
|
||||
HeapEntry::Type type,
|
||||
const char* name,
|
||||
int children_count,
|
||||
int retainers_count) {
|
||||
return snapshot_->AddEntry(type,
|
||||
name,
|
||||
collection_->GetObjectId(object->address()),
|
||||
object->Size(),
|
||||
children_count,
|
||||
retainers_count);
|
||||
}
|
||||
|
||||
|
||||
void V8HeapExplorer::AddRootEntries(SnapshotFillerInterface* filler) {
|
||||
filler->AddEntry(kInternalRootObject);
|
||||
filler->AddEntry(kGcRootsObject);
|
||||
}
|
||||
|
||||
|
||||
int V8HeapExplorer::EstimateObjectsCount() {
|
||||
HeapIterator iterator(HeapIterator::kFilterUnreachable);
|
||||
int objects_count = 0;
|
||||
for (HeapObject* obj = iterator.next();
|
||||
obj != NULL;
|
||||
obj = iterator.next(), ++objects_count) {}
|
||||
return objects_count;
|
||||
}
|
||||
|
||||
|
||||
class IndexedReferencesExtractor : public ObjectVisitor {
|
||||
public:
|
||||
IndexedReferencesExtractor(HeapSnapshotGenerator* generator,
|
||||
IndexedReferencesExtractor(V8HeapExplorer* generator,
|
||||
HeapObject* parent_obj,
|
||||
HeapEntry* parent_entry,
|
||||
HeapObjectsSet* known_references = NULL)
|
||||
@ -1917,7 +1784,7 @@ class IndexedReferencesExtractor : public ObjectVisitor {
|
||||
}
|
||||
}
|
||||
private:
|
||||
HeapSnapshotGenerator* generator_;
|
||||
V8HeapExplorer* generator_;
|
||||
HeapObject* parent_obj_;
|
||||
HeapEntry* parent_;
|
||||
HeapObjectsSet* known_references_;
|
||||
@ -1925,7 +1792,7 @@ class IndexedReferencesExtractor : public ObjectVisitor {
|
||||
};
|
||||
|
||||
|
||||
void HeapSnapshotGenerator::ExtractReferences(HeapObject* obj) {
|
||||
void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
|
||||
HeapEntry* entry = GetEntry(obj);
|
||||
if (entry == NULL) return; // No interest in this object.
|
||||
|
||||
@ -1969,7 +1836,7 @@ void HeapSnapshotGenerator::ExtractReferences(HeapObject* obj) {
|
||||
}
|
||||
|
||||
|
||||
void HeapSnapshotGenerator::ExtractClosureReferences(JSObject* js_obj,
|
||||
void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj,
|
||||
HeapEntry* entry) {
|
||||
if (js_obj->IsJSFunction()) {
|
||||
HandleScope hs;
|
||||
@ -1992,7 +1859,7 @@ void HeapSnapshotGenerator::ExtractClosureReferences(JSObject* js_obj,
|
||||
}
|
||||
|
||||
|
||||
void HeapSnapshotGenerator::ExtractPropertyReferences(JSObject* js_obj,
|
||||
void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj,
|
||||
HeapEntry* entry) {
|
||||
if (js_obj->HasFastProperties()) {
|
||||
DescriptorArray* descs = js_obj->map()->instance_descriptors();
|
||||
@ -2034,7 +1901,7 @@ void HeapSnapshotGenerator::ExtractPropertyReferences(JSObject* js_obj,
|
||||
}
|
||||
|
||||
|
||||
void HeapSnapshotGenerator::ExtractElementReferences(JSObject* js_obj,
|
||||
void V8HeapExplorer::ExtractElementReferences(JSObject* js_obj,
|
||||
HeapEntry* entry) {
|
||||
if (js_obj->HasFastElements()) {
|
||||
FixedArray* elements = FixedArray::cast(js_obj->elements());
|
||||
@ -2061,7 +1928,7 @@ void HeapSnapshotGenerator::ExtractElementReferences(JSObject* js_obj,
|
||||
}
|
||||
|
||||
|
||||
void HeapSnapshotGenerator::ExtractInternalReferences(JSObject* js_obj,
|
||||
void V8HeapExplorer::ExtractInternalReferences(JSObject* js_obj,
|
||||
HeapEntry* entry) {
|
||||
int length = js_obj->GetInternalFieldCount();
|
||||
for (int i = 0; i < length; ++i) {
|
||||
@ -2071,7 +1938,52 @@ void HeapSnapshotGenerator::ExtractInternalReferences(JSObject* js_obj,
|
||||
}
|
||||
|
||||
|
||||
void HeapSnapshotGenerator::SetClosureReference(HeapObject* parent_obj,
|
||||
HeapEntry* V8HeapExplorer::GetEntry(Object* obj) {
|
||||
if (!obj->IsHeapObject()) return NULL;
|
||||
return filler_->FindOrAddEntry(obj);
|
||||
}
|
||||
|
||||
|
||||
class RootsReferencesExtractor : public ObjectVisitor {
|
||||
public:
|
||||
explicit RootsReferencesExtractor(V8HeapExplorer* explorer)
|
||||
: explorer_(explorer) {
|
||||
}
|
||||
void VisitPointers(Object** start, Object** end) {
|
||||
for (Object** p = start; p < end; p++) explorer_->SetGcRootsReference(*p);
|
||||
}
|
||||
private:
|
||||
V8HeapExplorer* explorer_;
|
||||
};
|
||||
|
||||
|
||||
bool V8HeapExplorer::IterateAndExtractReferences(
|
||||
SnapshotFillerInterface* filler) {
|
||||
filler_ = filler;
|
||||
HeapIterator iterator(HeapIterator::kFilterUnreachable);
|
||||
bool interrupted = false;
|
||||
// Heap iteration with filtering must be finished in any case.
|
||||
for (HeapObject* obj = iterator.next();
|
||||
obj != NULL;
|
||||
obj = iterator.next(), progress_->ProgressStep()) {
|
||||
if (!interrupted) {
|
||||
ExtractReferences(obj);
|
||||
if (!progress_->ProgressReport(false)) interrupted = true;
|
||||
}
|
||||
}
|
||||
if (interrupted) {
|
||||
filler_ = NULL;
|
||||
return false;
|
||||
}
|
||||
SetRootGcRootsReference();
|
||||
RootsReferencesExtractor extractor(this);
|
||||
Heap::IterateRoots(&extractor, VISIT_ALL);
|
||||
filler_ = NULL;
|
||||
return progress_->ProgressReport(false);
|
||||
}
|
||||
|
||||
|
||||
void V8HeapExplorer::SetClosureReference(HeapObject* parent_obj,
|
||||
HeapEntry* parent_entry,
|
||||
String* reference_name,
|
||||
Object* child_obj) {
|
||||
@ -2088,7 +2000,7 @@ void HeapSnapshotGenerator::SetClosureReference(HeapObject* parent_obj,
|
||||
}
|
||||
|
||||
|
||||
void HeapSnapshotGenerator::SetElementReference(HeapObject* parent_obj,
|
||||
void V8HeapExplorer::SetElementReference(HeapObject* parent_obj,
|
||||
HeapEntry* parent_entry,
|
||||
int index,
|
||||
Object* child_obj) {
|
||||
@ -2105,7 +2017,7 @@ void HeapSnapshotGenerator::SetElementReference(HeapObject* parent_obj,
|
||||
}
|
||||
|
||||
|
||||
void HeapSnapshotGenerator::SetInternalReference(HeapObject* parent_obj,
|
||||
void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
|
||||
HeapEntry* parent_entry,
|
||||
const char* reference_name,
|
||||
Object* child_obj) {
|
||||
@ -2122,7 +2034,7 @@ void HeapSnapshotGenerator::SetInternalReference(HeapObject* parent_obj,
|
||||
}
|
||||
|
||||
|
||||
void HeapSnapshotGenerator::SetInternalReference(HeapObject* parent_obj,
|
||||
void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj,
|
||||
HeapEntry* parent_entry,
|
||||
int index,
|
||||
Object* child_obj) {
|
||||
@ -2139,7 +2051,7 @@ void HeapSnapshotGenerator::SetInternalReference(HeapObject* parent_obj,
|
||||
}
|
||||
|
||||
|
||||
void HeapSnapshotGenerator::SetHiddenReference(HeapObject* parent_obj,
|
||||
void V8HeapExplorer::SetHiddenReference(HeapObject* parent_obj,
|
||||
HeapEntry* parent_entry,
|
||||
int index,
|
||||
Object* child_obj) {
|
||||
@ -2155,7 +2067,7 @@ void HeapSnapshotGenerator::SetHiddenReference(HeapObject* parent_obj,
|
||||
}
|
||||
|
||||
|
||||
void HeapSnapshotGenerator::SetPropertyReference(HeapObject* parent_obj,
|
||||
void V8HeapExplorer::SetPropertyReference(HeapObject* parent_obj,
|
||||
HeapEntry* parent_entry,
|
||||
String* reference_name,
|
||||
Object* child_obj) {
|
||||
@ -2174,7 +2086,7 @@ void HeapSnapshotGenerator::SetPropertyReference(HeapObject* parent_obj,
|
||||
}
|
||||
|
||||
|
||||
void HeapSnapshotGenerator::SetPropertyShortcutReference(
|
||||
void V8HeapExplorer::SetPropertyShortcutReference(
|
||||
HeapObject* parent_obj,
|
||||
HeapEntry* parent_entry,
|
||||
String* reference_name,
|
||||
@ -2191,52 +2103,221 @@ void HeapSnapshotGenerator::SetPropertyShortcutReference(
|
||||
}
|
||||
|
||||
|
||||
void HeapSnapshotGenerator::SetRootGcRootsReference() {
|
||||
filler_->SetRootGcRootsReference();
|
||||
void V8HeapExplorer::SetRootGcRootsReference() {
|
||||
filler_->SetIndexedAutoIndexReference(
|
||||
HeapGraphEdge::kElement,
|
||||
kInternalRootObject, snapshot_->root(),
|
||||
kGcRootsObject, snapshot_->gc_roots());
|
||||
}
|
||||
|
||||
|
||||
void HeapSnapshotGenerator::SetRootShortcutReference(Object* child_obj) {
|
||||
void V8HeapExplorer::SetRootShortcutReference(Object* child_obj) {
|
||||
HeapEntry* child_entry = GetEntry(child_obj);
|
||||
ASSERT(child_entry != NULL);
|
||||
filler_->SetRootShortcutReference(child_obj, child_entry);
|
||||
filler_->SetNamedAutoIndexReference(
|
||||
HeapGraphEdge::kShortcut,
|
||||
kInternalRootObject, snapshot_->root(),
|
||||
child_obj, child_entry);
|
||||
}
|
||||
|
||||
|
||||
void HeapSnapshotGenerator::SetGcRootsReference(Object* child_obj) {
|
||||
void V8HeapExplorer::SetGcRootsReference(Object* child_obj) {
|
||||
HeapEntry* child_entry = GetEntry(child_obj);
|
||||
if (child_entry != NULL) {
|
||||
filler_->SetStrongRootReference(child_obj, child_entry);
|
||||
filler_->SetIndexedAutoIndexReference(
|
||||
HeapGraphEdge::kElement,
|
||||
kGcRootsObject, snapshot_->gc_roots(),
|
||||
child_obj, child_entry);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
HeapSnapshotGenerator::HeapSnapshotGenerator(HeapSnapshot* snapshot,
|
||||
v8::ActivityControl* control)
|
||||
: snapshot_(snapshot),
|
||||
control_(control),
|
||||
v8_heap_explorer_(snapshot_, this) {
|
||||
}
|
||||
|
||||
|
||||
class SnapshotCounter : public SnapshotFillerInterface {
|
||||
public:
|
||||
SnapshotCounter(HeapEntriesAllocator* allocator, HeapEntriesMap* entries)
|
||||
: allocator_(allocator), entries_(entries) { }
|
||||
HeapEntry* AddEntry(HeapThing ptr) {
|
||||
entries_->Pair(ptr, allocator_, HeapEntriesMap::kHeapEntryPlaceholder);
|
||||
return HeapEntriesMap::kHeapEntryPlaceholder;
|
||||
}
|
||||
HeapEntry* FindOrAddEntry(HeapThing ptr) {
|
||||
HeapEntry* entry = entries_->Map(ptr);
|
||||
return entry != NULL ? entry : AddEntry(ptr);
|
||||
}
|
||||
void SetIndexedReference(HeapGraphEdge::Type,
|
||||
HeapThing parent_ptr,
|
||||
HeapEntry*,
|
||||
int,
|
||||
HeapThing child_ptr,
|
||||
HeapEntry*) {
|
||||
entries_->CountReference(parent_ptr, child_ptr);
|
||||
}
|
||||
void SetIndexedAutoIndexReference(HeapGraphEdge::Type,
|
||||
HeapThing parent_ptr,
|
||||
HeapEntry*,
|
||||
HeapThing child_ptr,
|
||||
HeapEntry*) {
|
||||
entries_->CountReference(parent_ptr, child_ptr);
|
||||
}
|
||||
void SetNamedReference(HeapGraphEdge::Type,
|
||||
HeapThing parent_ptr,
|
||||
HeapEntry*,
|
||||
const char*,
|
||||
HeapThing child_ptr,
|
||||
HeapEntry*) {
|
||||
entries_->CountReference(parent_ptr, child_ptr);
|
||||
}
|
||||
void SetNamedAutoIndexReference(HeapGraphEdge::Type,
|
||||
HeapThing parent_ptr,
|
||||
HeapEntry*,
|
||||
HeapThing child_ptr,
|
||||
HeapEntry*) {
|
||||
entries_->CountReference(parent_ptr, child_ptr);
|
||||
}
|
||||
private:
|
||||
HeapEntriesAllocator* allocator_;
|
||||
HeapEntriesMap* entries_;
|
||||
};
|
||||
|
||||
|
||||
class SnapshotFiller : public SnapshotFillerInterface {
|
||||
public:
|
||||
explicit SnapshotFiller(HeapSnapshot* snapshot, HeapEntriesMap* entries)
|
||||
: snapshot_(snapshot),
|
||||
collection_(snapshot->collection()),
|
||||
entries_(entries) { }
|
||||
HeapEntry* AddEntry(HeapThing ptr) {
|
||||
UNREACHABLE();
|
||||
return NULL;
|
||||
}
|
||||
HeapEntry* FindOrAddEntry(HeapThing ptr) {
|
||||
HeapEntry* entry = entries_->Map(ptr);
|
||||
return entry != NULL ? entry : AddEntry(ptr);
|
||||
}
|
||||
void SetIndexedReference(HeapGraphEdge::Type type,
|
||||
HeapThing parent_ptr,
|
||||
HeapEntry* parent_entry,
|
||||
int index,
|
||||
HeapThing child_ptr,
|
||||
HeapEntry* child_entry) {
|
||||
int child_index, retainer_index;
|
||||
entries_->CountReference(
|
||||
parent_ptr, child_ptr, &child_index, &retainer_index);
|
||||
parent_entry->SetIndexedReference(
|
||||
type, child_index, index, child_entry, retainer_index);
|
||||
}
|
||||
void SetIndexedAutoIndexReference(HeapGraphEdge::Type type,
|
||||
HeapThing parent_ptr,
|
||||
HeapEntry* parent_entry,
|
||||
HeapThing child_ptr,
|
||||
HeapEntry* child_entry) {
|
||||
int child_index, retainer_index;
|
||||
entries_->CountReference(
|
||||
parent_ptr, child_ptr, &child_index, &retainer_index);
|
||||
parent_entry->SetIndexedReference(
|
||||
type, child_index, child_index + 1, child_entry, retainer_index);
|
||||
}
|
||||
void SetNamedReference(HeapGraphEdge::Type type,
|
||||
HeapThing parent_ptr,
|
||||
HeapEntry* parent_entry,
|
||||
const char* reference_name,
|
||||
HeapThing child_ptr,
|
||||
HeapEntry* child_entry) {
|
||||
int child_index, retainer_index;
|
||||
entries_->CountReference(
|
||||
parent_ptr, child_ptr, &child_index, &retainer_index);
|
||||
parent_entry->SetNamedReference(
|
||||
type, child_index, reference_name, child_entry, retainer_index);
|
||||
}
|
||||
void SetNamedAutoIndexReference(HeapGraphEdge::Type type,
|
||||
HeapThing parent_ptr,
|
||||
HeapEntry* parent_entry,
|
||||
HeapThing child_ptr,
|
||||
HeapEntry* child_entry) {
|
||||
int child_index, retainer_index;
|
||||
entries_->CountReference(
|
||||
parent_ptr, child_ptr, &child_index, &retainer_index);
|
||||
parent_entry->SetNamedReference(type,
|
||||
child_index,
|
||||
collection_->GetName(child_index + 1),
|
||||
child_entry,
|
||||
retainer_index);
|
||||
}
|
||||
private:
|
||||
HeapSnapshot* snapshot_;
|
||||
HeapSnapshotsCollection* collection_;
|
||||
HeapEntriesMap* entries_;
|
||||
};
|
||||
|
||||
|
||||
bool HeapSnapshotGenerator::GenerateSnapshot() {
|
||||
AssertNoAllocation no_alloc;
|
||||
|
||||
SetProgressTotal(4); // 2 passes + dominators + sizes.
|
||||
|
||||
// Pass 1. Iterate heap contents to count entries and references.
|
||||
if (!CountEntriesAndReferences()) return false;
|
||||
|
||||
// Allocate and fill entries in the snapshot, allocate references.
|
||||
snapshot_->AllocateEntries(entries_.entries_count(),
|
||||
entries_.total_children_count(),
|
||||
entries_.total_retainers_count());
|
||||
entries_.AllocateEntries();
|
||||
|
||||
// Pass 2. Fill references.
|
||||
if (!FillReferences()) return false;
|
||||
|
||||
if (!SetEntriesDominators()) return false;
|
||||
if (!ApproximateRetainedSizes()) return false;
|
||||
|
||||
progress_counter_ = progress_total_;
|
||||
if (!ProgressReport(true)) return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
void HeapSnapshotGenerator::ProgressStep() {
|
||||
++progress_counter_;
|
||||
}
|
||||
|
||||
|
||||
bool HeapSnapshotGenerator::ProgressReport(bool force) {
|
||||
const int kProgressReportGranularity = 10000;
|
||||
if (control_ != NULL
|
||||
&& (force || progress_counter_ % kProgressReportGranularity == 0)) {
|
||||
return
|
||||
control_->ReportProgressValue(progress_counter_, progress_total_) ==
|
||||
v8::ActivityControl::kContinue;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
void HeapSnapshotGenerator::SetProgressTotal(int iterations_count) {
|
||||
if (control_ == NULL) return;
|
||||
|
||||
HeapIterator iterator(HeapIterator::kFilterUnreachable);
|
||||
int objects_count = 0;
|
||||
for (HeapObject* obj = iterator.next();
|
||||
obj != NULL;
|
||||
obj = iterator.next(), ++objects_count) {}
|
||||
progress_total_ = objects_count * iterations_count;
|
||||
progress_total_ = v8_heap_explorer_.EstimateObjectsCount() * iterations_count;
|
||||
progress_counter_ = 0;
|
||||
}
|
||||
|
||||
|
||||
bool HeapSnapshotGenerator::CountEntriesAndReferences() {
|
||||
SnapshotCounter counter(&entries_);
|
||||
filler_ = &counter;
|
||||
filler_->AddEntry(HeapSnapshot::kInternalRootObject);
|
||||
filler_->AddEntry(HeapSnapshot::kGcRootsObject);
|
||||
return IterateAndExtractReferences();
|
||||
SnapshotCounter counter(&v8_heap_explorer_, &entries_);
|
||||
v8_heap_explorer_.AddRootEntries(&counter);
|
||||
return v8_heap_explorer_.IterateAndExtractReferences(&counter);
|
||||
}
|
||||
|
||||
|
||||
bool HeapSnapshotGenerator::FillReferences() {
|
||||
SnapshotFiller filler(snapshot_, &entries_);
|
||||
filler_ = &filler;
|
||||
return IterateAndExtractReferences();
|
||||
return v8_heap_explorer_.IterateAndExtractReferences(&filler);
|
||||
}
|
||||
|
||||
|
||||
@ -2322,7 +2403,7 @@ bool HeapSnapshotGenerator::BuildDominatorTree(
|
||||
int remaining = entries_length - changed;
|
||||
if (remaining < 0) remaining = 0;
|
||||
progress_counter_ = base_progress_counter + remaining;
|
||||
if (!ReportProgress(true)) return false;
|
||||
if (!ProgressReport(true)) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
@ -2352,7 +2433,7 @@ bool HeapSnapshotGenerator::ApproximateRetainedSizes() {
|
||||
}
|
||||
for (int i = 0;
|
||||
i < snapshot_->entries()->length();
|
||||
++i, IncProgressCounter()) {
|
||||
++i, ProgressStep()) {
|
||||
HeapEntry* entry = snapshot_->entries()->at(i);
|
||||
int entry_size = entry->self_size();
|
||||
for (HeapEntry* dominator = entry->dominator();
|
||||
@ -2360,32 +2441,12 @@ bool HeapSnapshotGenerator::ApproximateRetainedSizes() {
|
||||
entry = dominator, dominator = entry->dominator()) {
|
||||
dominator->add_retained_size(entry_size);
|
||||
}
|
||||
if (!ReportProgress()) return false;
|
||||
if (!ProgressReport()) return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool HeapSnapshotGenerator::IterateAndExtractReferences() {
|
||||
HeapIterator iterator(HeapIterator::kFilterUnreachable);
|
||||
bool interrupted = false;
|
||||
// Heap iteration with filtering must be finished in any case.
|
||||
for (HeapObject* obj = iterator.next();
|
||||
obj != NULL;
|
||||
obj = iterator.next(), IncProgressCounter()) {
|
||||
if (!interrupted) {
|
||||
ExtractReferences(obj);
|
||||
if (!ReportProgress()) interrupted = true;
|
||||
}
|
||||
}
|
||||
if (interrupted) return false;
|
||||
SetRootGcRootsReference();
|
||||
RootsReferencesExtractor extractor(this);
|
||||
Heap::IterateRoots(&extractor, VISIT_ALL);
|
||||
return ReportProgress();
|
||||
}
|
||||
|
||||
|
||||
void HeapSnapshotsDiff::CreateRoots(int additions_count, int deletions_count) {
|
||||
raw_additions_root_ =
|
||||
NewArray<char>(HeapEntry::EntriesSize(1, additions_count, 0));
|
||||
|
168
deps/v8/src/profile-generator.h
vendored
168
deps/v8/src/profile-generator.h
vendored
@ -681,14 +681,14 @@ class HeapSnapshot {
|
||||
|
||||
void AllocateEntries(
|
||||
int entries_count, int children_count, int retainers_count);
|
||||
HeapEntry* AddEntry(
|
||||
HeapObject* object, int children_count, int retainers_count);
|
||||
HeapEntry* AddEntry(HeapEntry::Type type,
|
||||
const char* name,
|
||||
uint64_t id,
|
||||
int size,
|
||||
int children_count,
|
||||
int retainers_count);
|
||||
HeapEntry* AddRootEntry(int children_count);
|
||||
HeapEntry* AddGcRootsEntry(int children_count, int retainers_count);
|
||||
void ClearPaint();
|
||||
HeapSnapshotsDiff* CompareWith(HeapSnapshot* snapshot);
|
||||
HeapEntry* GetEntryById(uint64_t id);
|
||||
@ -701,15 +701,7 @@ class HeapSnapshot {
|
||||
void Print(int max_depth);
|
||||
void PrintEntriesSize();
|
||||
|
||||
static HeapObject* const kInternalRootObject;
|
||||
static HeapObject* const kGcRootsObject;
|
||||
|
||||
private:
|
||||
HeapEntry* AddEntry(HeapObject* object,
|
||||
HeapEntry::Type type,
|
||||
const char* name,
|
||||
int children_count,
|
||||
int retainers_count);
|
||||
HeapEntry* GetNextEntryToInit();
|
||||
|
||||
HeapSnapshotsCollection* collection_;
|
||||
@ -873,6 +865,20 @@ class HeapSnapshotsCollection {
|
||||
};
|
||||
|
||||
|
||||
// A typedef for referencing anything that can be snapshotted living
|
||||
// in any kind of heap memory.
|
||||
typedef void* HeapThing;
|
||||
|
||||
|
||||
// An interface that creates HeapEntries by HeapThings.
|
||||
class HeapEntriesAllocator {
|
||||
public:
|
||||
virtual ~HeapEntriesAllocator() { }
|
||||
virtual HeapEntry* AllocateEntry(
|
||||
HeapThing ptr, int children_count, int retainers_count) = 0;
|
||||
};
|
||||
|
||||
|
||||
// The HeapEntriesMap instance is used to track a mapping between
|
||||
// real heap objects and their representations in heap snapshots.
|
||||
class HeapEntriesMap {
|
||||
@ -880,13 +886,12 @@ class HeapEntriesMap {
|
||||
HeapEntriesMap();
|
||||
~HeapEntriesMap();
|
||||
|
||||
HeapEntry* Map(HeapObject* object);
|
||||
void Pair(HeapObject* object, HeapEntry* entry);
|
||||
void CountReference(HeapObject* from, HeapObject* to,
|
||||
void AllocateEntries();
|
||||
HeapEntry* Map(HeapThing thing);
|
||||
void Pair(HeapThing thing, HeapEntriesAllocator* allocator, HeapEntry* entry);
|
||||
void CountReference(HeapThing from, HeapThing to,
|
||||
int* prev_children_count = NULL,
|
||||
int* prev_retainers_count = NULL);
|
||||
template<class Visitor>
|
||||
void UpdateEntries(Visitor* visitor);
|
||||
|
||||
int entries_count() { return entries_count_; }
|
||||
int total_children_count() { return total_children_count_; }
|
||||
@ -896,18 +901,25 @@ class HeapEntriesMap {
|
||||
|
||||
private:
|
||||
struct EntryInfo {
|
||||
explicit EntryInfo(HeapEntry* entry)
|
||||
: entry(entry), children_count(0), retainers_count(0) { }
|
||||
EntryInfo(HeapEntry* entry, HeapEntriesAllocator* allocator)
|
||||
: entry(entry),
|
||||
allocator(allocator),
|
||||
children_count(0),
|
||||
retainers_count(0) {
|
||||
}
|
||||
HeapEntry* entry;
|
||||
HeapEntriesAllocator* allocator;
|
||||
int children_count;
|
||||
int retainers_count;
|
||||
};
|
||||
|
||||
static uint32_t Hash(HeapObject* object) {
|
||||
static uint32_t Hash(HeapThing thing) {
|
||||
return ComputeIntegerHash(
|
||||
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(object)));
|
||||
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(thing)));
|
||||
}
|
||||
static bool HeapThingsMatch(HeapThing key1, HeapThing key2) {
|
||||
return key1 == key2;
|
||||
}
|
||||
static bool HeapObjectsMatch(void* key1, void* key2) { return key1 == key2; }
|
||||
|
||||
HashMap entries_;
|
||||
int entries_count_;
|
||||
@ -934,52 +946,70 @@ class HeapObjectsSet {
|
||||
};
|
||||
|
||||
|
||||
class HeapSnapshotGenerator {
|
||||
public:
|
||||
// An interface used to populate a snapshot with nodes and edges.
|
||||
class SnapshotFillerInterface {
|
||||
public:
|
||||
virtual ~SnapshotFillerInterface() { }
|
||||
virtual HeapEntry* AddEntry(HeapObject* obj) = 0;
|
||||
virtual HeapEntry* AddEntry(HeapThing ptr) = 0;
|
||||
virtual HeapEntry* FindOrAddEntry(HeapThing ptr) = 0;
|
||||
virtual void SetIndexedReference(HeapGraphEdge::Type type,
|
||||
HeapObject* parent_obj,
|
||||
HeapThing parent_ptr,
|
||||
HeapEntry* parent_entry,
|
||||
int index,
|
||||
Object* child_obj,
|
||||
HeapThing child_ptr,
|
||||
HeapEntry* child_entry) = 0;
|
||||
virtual void SetIndexedAutoIndexReference(HeapGraphEdge::Type type,
|
||||
HeapThing parent_ptr,
|
||||
HeapEntry* parent_entry,
|
||||
HeapThing child_ptr,
|
||||
HeapEntry* child_entry) = 0;
|
||||
virtual void SetNamedReference(HeapGraphEdge::Type type,
|
||||
HeapObject* parent_obj,
|
||||
HeapThing parent_ptr,
|
||||
HeapEntry* parent_entry,
|
||||
const char* reference_name,
|
||||
Object* child_obj,
|
||||
HeapThing child_ptr,
|
||||
HeapEntry* child_entry) = 0;
|
||||
virtual void SetRootGcRootsReference() = 0;
|
||||
virtual void SetRootShortcutReference(Object* child_obj,
|
||||
HeapEntry* child_entry) = 0;
|
||||
virtual void SetStrongRootReference(Object* child_obj,
|
||||
virtual void SetNamedAutoIndexReference(HeapGraphEdge::Type type,
|
||||
HeapThing parent_ptr,
|
||||
HeapEntry* parent_entry,
|
||||
HeapThing child_ptr,
|
||||
HeapEntry* child_entry) = 0;
|
||||
};
|
||||
|
||||
HeapSnapshotGenerator(HeapSnapshot* snapshot,
|
||||
v8::ActivityControl* control);
|
||||
bool GenerateSnapshot();
|
||||
|
||||
class SnapshottingProgressReportingInterface {
|
||||
public:
|
||||
virtual ~SnapshottingProgressReportingInterface() { }
|
||||
virtual void ProgressStep() = 0;
|
||||
virtual bool ProgressReport(bool force) = 0;
|
||||
};
|
||||
|
||||
|
||||
// An implementation of V8 heap graph extractor.
|
||||
class V8HeapExplorer : public HeapEntriesAllocator {
|
||||
public:
|
||||
V8HeapExplorer(HeapSnapshot* snapshot,
|
||||
SnapshottingProgressReportingInterface* progress);
|
||||
~V8HeapExplorer();
|
||||
virtual HeapEntry* AllocateEntry(
|
||||
HeapThing ptr, int children_count, int retainers_count);
|
||||
void AddRootEntries(SnapshotFillerInterface* filler);
|
||||
int EstimateObjectsCount();
|
||||
bool IterateAndExtractReferences(SnapshotFillerInterface* filler);
|
||||
|
||||
private:
|
||||
bool ApproximateRetainedSizes();
|
||||
bool BuildDominatorTree(const Vector<HeapEntry*>& entries,
|
||||
Vector<HeapEntry*>* dominators);
|
||||
bool CountEntriesAndReferences();
|
||||
HeapEntry* GetEntry(Object* obj);
|
||||
void IncProgressCounter() { ++progress_counter_; }
|
||||
HeapEntry* AddEntry(
|
||||
HeapObject* object, int children_count, int retainers_count);
|
||||
HeapEntry* AddEntry(HeapObject* object,
|
||||
HeapEntry::Type type,
|
||||
const char* name,
|
||||
int children_count,
|
||||
int retainers_count);
|
||||
void ExtractReferences(HeapObject* obj);
|
||||
void ExtractClosureReferences(JSObject* js_obj, HeapEntry* entry);
|
||||
void ExtractPropertyReferences(JSObject* js_obj, HeapEntry* entry);
|
||||
void ExtractElementReferences(JSObject* js_obj, HeapEntry* entry);
|
||||
void ExtractInternalReferences(JSObject* js_obj, HeapEntry* entry);
|
||||
bool FillReferences();
|
||||
void FillReversePostorderIndexes(Vector<HeapEntry*>* entries);
|
||||
bool IterateAndExtractReferences();
|
||||
inline bool ReportProgress(bool force = false);
|
||||
bool SetEntriesDominators();
|
||||
void SetClosureReference(HeapObject* parent_obj,
|
||||
HeapEntry* parent,
|
||||
String* reference_name,
|
||||
@ -1011,24 +1041,54 @@ class HeapSnapshotGenerator {
|
||||
void SetRootShortcutReference(Object* child);
|
||||
void SetRootGcRootsReference();
|
||||
void SetGcRootsReference(Object* child);
|
||||
|
||||
HeapEntry* GetEntry(Object* obj);
|
||||
|
||||
HeapSnapshot* snapshot_;
|
||||
HeapSnapshotsCollection* collection_;
|
||||
SnapshottingProgressReportingInterface* progress_;
|
||||
// Used during references extraction to mark heap objects that
|
||||
// are references via non-hidden properties.
|
||||
HeapObjectsSet known_references_;
|
||||
SnapshotFillerInterface* filler_;
|
||||
|
||||
static HeapObject* const kInternalRootObject;
|
||||
static HeapObject* const kGcRootsObject;
|
||||
|
||||
friend class IndexedReferencesExtractor;
|
||||
friend class RootsReferencesExtractor;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(V8HeapExplorer);
|
||||
};
|
||||
|
||||
|
||||
class HeapSnapshotGenerator : public SnapshottingProgressReportingInterface {
|
||||
public:
|
||||
HeapSnapshotGenerator(HeapSnapshot* snapshot,
|
||||
v8::ActivityControl* control);
|
||||
bool GenerateSnapshot();
|
||||
|
||||
private:
|
||||
bool ApproximateRetainedSizes();
|
||||
bool BuildDominatorTree(const Vector<HeapEntry*>& entries,
|
||||
Vector<HeapEntry*>* dominators);
|
||||
bool CountEntriesAndReferences();
|
||||
bool FillReferences();
|
||||
void FillReversePostorderIndexes(Vector<HeapEntry*>* entries);
|
||||
void ProgressStep();
|
||||
bool ProgressReport(bool force = false);
|
||||
bool SetEntriesDominators();
|
||||
void SetProgressTotal(int iterations_count);
|
||||
|
||||
HeapSnapshot* snapshot_;
|
||||
v8::ActivityControl* control_;
|
||||
HeapSnapshotsCollection* collection_;
|
||||
// Mapping from HeapObject* pointers to HeapEntry* pointers.
|
||||
V8HeapExplorer v8_heap_explorer_;
|
||||
// Mapping from HeapThing pointers to HeapEntry* pointers.
|
||||
HeapEntriesMap entries_;
|
||||
SnapshotFillerInterface* filler_;
|
||||
// Used during references extraction to mark heap objects that
|
||||
// are references via non-hidden properties.
|
||||
HeapObjectsSet known_references_;
|
||||
// Used during snapshot generation.
|
||||
int progress_counter_;
|
||||
int progress_total_;
|
||||
|
||||
friend class IndexedReferencesExtractor;
|
||||
friend class RootsReferencesExtractor;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(HeapSnapshotGenerator);
|
||||
};
|
||||
|
||||
|
71
deps/v8/src/runtime-profiler.cc
vendored
71
deps/v8/src/runtime-profiler.cc
vendored
@ -35,6 +35,7 @@
|
||||
#include "deoptimizer.h"
|
||||
#include "execution.h"
|
||||
#include "global-handles.h"
|
||||
#include "mark-compact.h"
|
||||
#include "scopeinfo.h"
|
||||
#include "top.h"
|
||||
|
||||
@ -100,11 +101,6 @@ static int sampler_ticks_until_threshold_adjustment =
|
||||
// The ratio of ticks spent in JS code in percent.
|
||||
static Atomic32 js_ratio;
|
||||
|
||||
// The JSFunctions in the sampler window are not GC safe. Old-space
|
||||
// pointers are not cleared during mark-sweep collection and therefore
|
||||
// the window might contain stale pointers. The window is updated on
|
||||
// scavenges and (parts of it) cleared on mark-sweep and
|
||||
// mark-sweep-compact.
|
||||
static Object* sampler_window[kSamplerWindowSize] = { NULL, };
|
||||
static int sampler_window_position = 0;
|
||||
static int sampler_window_weight[kSamplerWindowSize] = { 0, };
|
||||
@ -134,7 +130,6 @@ void PendingListNode::WeakCallback(v8::Persistent<v8::Value>, void* data) {
|
||||
|
||||
|
||||
static bool IsOptimizable(JSFunction* function) {
|
||||
if (Heap::InNewSpace(function)) return false;
|
||||
Code* code = function->code();
|
||||
return code->kind() == Code::FUNCTION && code->optimizable();
|
||||
}
|
||||
@ -208,16 +203,6 @@ static void ClearSampleBuffer() {
|
||||
}
|
||||
|
||||
|
||||
static void ClearSampleBufferNewSpaceEntries() {
|
||||
for (int i = 0; i < kSamplerWindowSize; i++) {
|
||||
if (Heap::InNewSpace(sampler_window[i])) {
|
||||
sampler_window[i] = NULL;
|
||||
sampler_window_weight[i] = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static int LookupSample(JSFunction* function) {
|
||||
int weight = 0;
|
||||
for (int i = 0; i < kSamplerWindowSize; i++) {
|
||||
@ -372,24 +357,6 @@ void RuntimeProfiler::NotifyTick() {
|
||||
}
|
||||
|
||||
|
||||
void RuntimeProfiler::MarkCompactPrologue(bool is_compacting) {
|
||||
if (is_compacting) {
|
||||
// Clear all samples before mark-sweep-compact because every
|
||||
// function might move.
|
||||
ClearSampleBuffer();
|
||||
} else {
|
||||
// Clear only new space entries on mark-sweep since none of the
|
||||
// old-space functions will move.
|
||||
ClearSampleBufferNewSpaceEntries();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool IsEqual(void* first, void* second) {
|
||||
return first == second;
|
||||
}
|
||||
|
||||
|
||||
void RuntimeProfiler::Setup() {
|
||||
ClearSampleBuffer();
|
||||
// If the ticker hasn't already started, make sure to do so to get
|
||||
@ -411,13 +378,41 @@ void RuntimeProfiler::TearDown() {
|
||||
}
|
||||
|
||||
|
||||
Object** RuntimeProfiler::SamplerWindowAddress() {
|
||||
return sampler_window;
|
||||
int RuntimeProfiler::SamplerWindowSize() {
|
||||
return kSamplerWindowSize;
|
||||
}
|
||||
|
||||
|
||||
int RuntimeProfiler::SamplerWindowSize() {
|
||||
return kSamplerWindowSize;
|
||||
// Update the pointers in the sampler window after a GC.
|
||||
void RuntimeProfiler::UpdateSamplesAfterScavenge() {
|
||||
for (int i = 0; i < kSamplerWindowSize; i++) {
|
||||
Object* function = sampler_window[i];
|
||||
if (function != NULL && Heap::InNewSpace(function)) {
|
||||
MapWord map_word = HeapObject::cast(function)->map_word();
|
||||
if (map_word.IsForwardingAddress()) {
|
||||
sampler_window[i] = map_word.ToForwardingAddress();
|
||||
} else {
|
||||
sampler_window[i] = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void RuntimeProfiler::RemoveDeadSamples() {
|
||||
for (int i = 0; i < kSamplerWindowSize; i++) {
|
||||
Object* function = sampler_window[i];
|
||||
if (function != NULL && !HeapObject::cast(function)->IsMarked()) {
|
||||
sampler_window[i] = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void RuntimeProfiler::UpdateSamplesAfterCompact(ObjectVisitor* visitor) {
|
||||
for (int i = 0; i < kSamplerWindowSize; i++) {
|
||||
visitor->VisitPointer(&sampler_window[i]);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
5
deps/v8/src/runtime-profiler.h
vendored
5
deps/v8/src/runtime-profiler.h
vendored
@ -47,9 +47,10 @@ class RuntimeProfiler : public AllStatic {
|
||||
static void Reset();
|
||||
static void TearDown();
|
||||
|
||||
static void MarkCompactPrologue(bool is_compacting);
|
||||
static Object** SamplerWindowAddress();
|
||||
static int SamplerWindowSize();
|
||||
static void UpdateSamplesAfterScavenge();
|
||||
static void RemoveDeadSamples();
|
||||
static void UpdateSamplesAfterCompact(ObjectVisitor* visitor);
|
||||
};
|
||||
|
||||
|
||||
|
996
deps/v8/src/runtime.cc
vendored
996
deps/v8/src/runtime.cc
vendored
File diff suppressed because it is too large
Load Diff
27
deps/v8/src/runtime.h
vendored
27
deps/v8/src/runtime.h
vendored
@ -241,7 +241,7 @@ namespace internal {
|
||||
F(ResolvePossiblyDirectEval, 4, 2) \
|
||||
F(ResolvePossiblyDirectEvalNoLookup, 4, 2) \
|
||||
\
|
||||
F(SetProperty, -1 /* 3 or 4 */, 1) \
|
||||
F(SetProperty, -1 /* 4 or 5 */, 1) \
|
||||
F(DefineOrRedefineDataProperty, 4, 1) \
|
||||
F(DefineOrRedefineAccessorProperty, 5, 1) \
|
||||
F(IgnoreAttributesAndSetProperty, -1 /* 3 or 4 */, 1) \
|
||||
@ -288,12 +288,12 @@ namespace internal {
|
||||
F(DeleteContextSlot, 2, 1) \
|
||||
F(LoadContextSlot, 2, 2) \
|
||||
F(LoadContextSlotNoReferenceError, 2, 2) \
|
||||
F(StoreContextSlot, 3, 1) \
|
||||
F(StoreContextSlot, 4, 1) \
|
||||
\
|
||||
/* Declarations and initialization */ \
|
||||
F(DeclareGlobals, 3, 1) \
|
||||
F(DeclareGlobals, 4, 1) \
|
||||
F(DeclareContextSlot, 4, 1) \
|
||||
F(InitializeVarGlobal, -1 /* 1 or 2 */, 1) \
|
||||
F(InitializeVarGlobal, -1 /* 2 or 3 */, 1) \
|
||||
F(InitializeConstGlobal, 2, 1) \
|
||||
F(InitializeConstContextSlot, 3, 1) \
|
||||
F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
|
||||
@ -376,7 +376,21 @@ namespace internal {
|
||||
\
|
||||
F(SetFlags, 1, 1) \
|
||||
F(CollectGarbage, 1, 1) \
|
||||
F(GetHeapUsage, 0, 1)
|
||||
F(GetHeapUsage, 0, 1) \
|
||||
\
|
||||
/* LiveObjectList support*/ \
|
||||
F(HasLOLEnabled, 0, 1) \
|
||||
F(CaptureLOL, 0, 1) \
|
||||
F(DeleteLOL, 1, 1) \
|
||||
F(DumpLOL, 5, 1) \
|
||||
F(GetLOLObj, 1, 1) \
|
||||
F(GetLOLObjId, 1, 1) \
|
||||
F(GetLOLObjRetainers, 6, 1) \
|
||||
F(GetLOLPath, 3, 1) \
|
||||
F(InfoLOL, 2, 1) \
|
||||
F(PrintLOLObj, 1, 1) \
|
||||
F(ResetLOL, 0, 1) \
|
||||
F(SummarizeLOL, 3, 1)
|
||||
|
||||
#else
|
||||
#define RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F)
|
||||
@ -538,7 +552,8 @@ class Runtime : public AllStatic {
|
||||
Handle<Object> object,
|
||||
Handle<Object> key,
|
||||
Handle<Object> value,
|
||||
PropertyAttributes attr);
|
||||
PropertyAttributes attr,
|
||||
StrictModeFlag strict);
|
||||
|
||||
MUST_USE_RESULT static MaybeObject* ForceSetObjectProperty(
|
||||
Handle<JSObject> object,
|
||||
|
6
deps/v8/src/spaces.h
vendored
6
deps/v8/src/spaces.h
vendored
@ -2121,6 +2121,12 @@ class MapSpace : public FixedSpace {
|
||||
accounting_stats_.DeallocateBytes(accounting_stats_.Size());
|
||||
accounting_stats_.AllocateBytes(new_size);
|
||||
|
||||
// Flush allocation watermarks.
|
||||
for (Page* p = first_page_; p != top_page; p = p->next_page()) {
|
||||
p->SetAllocationWatermark(p->AllocationTop());
|
||||
}
|
||||
top_page->SetAllocationWatermark(new_top);
|
||||
|
||||
#ifdef DEBUG
|
||||
if (FLAG_enable_slow_asserts) {
|
||||
intptr_t actual_size = 0;
|
||||
|
78
deps/v8/src/stub-cache.cc
vendored
78
deps/v8/src/stub-cache.cc
vendored
@ -498,13 +498,13 @@ MaybeObject* StubCache::ComputeStoreField(String* name,
|
||||
JSObject* receiver,
|
||||
int field_index,
|
||||
Map* transition,
|
||||
Code::ExtraICState extra_ic_state) {
|
||||
StrictModeFlag strict_mode) {
|
||||
PropertyType type = (transition == NULL) ? FIELD : MAP_TRANSITION;
|
||||
Code::Flags flags = Code::ComputeMonomorphicFlags(
|
||||
Code::STORE_IC, type, extra_ic_state);
|
||||
Code::STORE_IC, type, strict_mode);
|
||||
Object* code = receiver->map()->FindInCodeCache(name, flags);
|
||||
if (code->IsUndefined()) {
|
||||
StoreStubCompiler compiler(extra_ic_state);
|
||||
StoreStubCompiler compiler(strict_mode);
|
||||
{ MaybeObject* maybe_code =
|
||||
compiler.CompileStoreField(receiver, field_index, transition, name);
|
||||
if (!maybe_code->ToObject(&code)) return maybe_code;
|
||||
@ -521,13 +521,15 @@ MaybeObject* StubCache::ComputeStoreField(String* name,
|
||||
}
|
||||
|
||||
|
||||
MaybeObject* StubCache::ComputeKeyedStoreSpecialized(JSObject* receiver) {
|
||||
MaybeObject* StubCache::ComputeKeyedStoreSpecialized(
|
||||
JSObject* receiver,
|
||||
StrictModeFlag strict_mode) {
|
||||
Code::Flags flags =
|
||||
Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, NORMAL);
|
||||
Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, NORMAL, strict_mode);
|
||||
String* name = Heap::KeyedStoreSpecialized_symbol();
|
||||
Object* code = receiver->map()->FindInCodeCache(name, flags);
|
||||
if (code->IsUndefined()) {
|
||||
KeyedStoreStubCompiler compiler;
|
||||
KeyedStoreStubCompiler compiler(strict_mode);
|
||||
{ MaybeObject* maybe_code = compiler.CompileStoreSpecialized(receiver);
|
||||
if (!maybe_code->ToObject(&code)) return maybe_code;
|
||||
}
|
||||
@ -542,7 +544,9 @@ MaybeObject* StubCache::ComputeKeyedStoreSpecialized(JSObject* receiver) {
|
||||
}
|
||||
|
||||
|
||||
MaybeObject* StubCache::ComputeKeyedStorePixelArray(JSObject* receiver) {
|
||||
MaybeObject* StubCache::ComputeKeyedStorePixelArray(
|
||||
JSObject* receiver,
|
||||
StrictModeFlag strict_mode) {
|
||||
// Using NORMAL as the PropertyType for array element stores is a misuse. The
|
||||
// generated stub always accesses fast elements, not slow-mode fields, but
|
||||
// some property type is required for the stub lookup. Note that overloading
|
||||
@ -550,11 +554,11 @@ MaybeObject* StubCache::ComputeKeyedStorePixelArray(JSObject* receiver) {
|
||||
// other keyed field stores. This is guaranteed to be the case since all field
|
||||
// keyed stores that are not array elements go through a generic builtin stub.
|
||||
Code::Flags flags =
|
||||
Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, NORMAL);
|
||||
Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, NORMAL, strict_mode);
|
||||
String* name = Heap::KeyedStorePixelArray_symbol();
|
||||
Object* code = receiver->map()->FindInCodeCache(name, flags);
|
||||
if (code->IsUndefined()) {
|
||||
KeyedStoreStubCompiler compiler;
|
||||
KeyedStoreStubCompiler compiler(strict_mode);
|
||||
{ MaybeObject* maybe_code = compiler.CompileStorePixelArray(receiver);
|
||||
if (!maybe_code->ToObject(&code)) return maybe_code;
|
||||
}
|
||||
@ -598,11 +602,13 @@ ExternalArrayType ElementsKindToExternalArrayType(JSObject::ElementsKind kind) {
|
||||
|
||||
MaybeObject* StubCache::ComputeKeyedLoadOrStoreExternalArray(
|
||||
JSObject* receiver,
|
||||
bool is_store) {
|
||||
bool is_store,
|
||||
StrictModeFlag strict_mode) {
|
||||
Code::Flags flags =
|
||||
Code::ComputeMonomorphicFlags(
|
||||
is_store ? Code::KEYED_STORE_IC : Code::KEYED_LOAD_IC,
|
||||
NORMAL);
|
||||
NORMAL,
|
||||
strict_mode);
|
||||
ExternalArrayType array_type =
|
||||
ElementsKindToExternalArrayType(receiver->GetElementsKind());
|
||||
String* name =
|
||||
@ -615,9 +621,9 @@ MaybeObject* StubCache::ComputeKeyedLoadOrStoreExternalArray(
|
||||
Object* code = map->FindInCodeCache(name, flags);
|
||||
if (code->IsUndefined()) {
|
||||
ExternalArrayStubCompiler compiler;
|
||||
{ MaybeObject* maybe_code =
|
||||
is_store ? compiler.CompileKeyedStoreStub(array_type, flags) :
|
||||
compiler.CompileKeyedLoadStub(array_type, flags);
|
||||
{ MaybeObject* maybe_code = is_store
|
||||
? compiler.CompileKeyedStoreStub(array_type, flags)
|
||||
: compiler.CompileKeyedLoadStub(array_type, flags);
|
||||
if (!maybe_code->ToObject(&code)) return maybe_code;
|
||||
}
|
||||
if (is_store) {
|
||||
@ -637,8 +643,8 @@ MaybeObject* StubCache::ComputeKeyedLoadOrStoreExternalArray(
|
||||
}
|
||||
|
||||
|
||||
MaybeObject* StubCache::ComputeStoreNormal(Code::ExtraICState extra_ic_state) {
|
||||
return Builtins::builtin(extra_ic_state == StoreIC::kStoreICStrict
|
||||
MaybeObject* StubCache::ComputeStoreNormal(StrictModeFlag strict_mode) {
|
||||
return Builtins::builtin((strict_mode == kStrictMode)
|
||||
? Builtins::StoreIC_Normal_Strict
|
||||
: Builtins::StoreIC_Normal);
|
||||
}
|
||||
@ -647,12 +653,12 @@ MaybeObject* StubCache::ComputeStoreNormal(Code::ExtraICState extra_ic_state) {
|
||||
MaybeObject* StubCache::ComputeStoreGlobal(String* name,
|
||||
GlobalObject* receiver,
|
||||
JSGlobalPropertyCell* cell,
|
||||
Code::ExtraICState extra_ic_state) {
|
||||
StrictModeFlag strict_mode) {
|
||||
Code::Flags flags = Code::ComputeMonomorphicFlags(
|
||||
Code::STORE_IC, NORMAL, extra_ic_state);
|
||||
Code::STORE_IC, NORMAL, strict_mode);
|
||||
Object* code = receiver->map()->FindInCodeCache(name, flags);
|
||||
if (code->IsUndefined()) {
|
||||
StoreStubCompiler compiler(extra_ic_state);
|
||||
StoreStubCompiler compiler(strict_mode);
|
||||
{ MaybeObject* maybe_code =
|
||||
compiler.CompileStoreGlobal(receiver, cell, name);
|
||||
if (!maybe_code->ToObject(&code)) return maybe_code;
|
||||
@ -673,13 +679,13 @@ MaybeObject* StubCache::ComputeStoreCallback(
|
||||
String* name,
|
||||
JSObject* receiver,
|
||||
AccessorInfo* callback,
|
||||
Code::ExtraICState extra_ic_state) {
|
||||
StrictModeFlag strict_mode) {
|
||||
ASSERT(v8::ToCData<Address>(callback->setter()) != 0);
|
||||
Code::Flags flags = Code::ComputeMonomorphicFlags(
|
||||
Code::STORE_IC, CALLBACKS, extra_ic_state);
|
||||
Code::STORE_IC, CALLBACKS, strict_mode);
|
||||
Object* code = receiver->map()->FindInCodeCache(name, flags);
|
||||
if (code->IsUndefined()) {
|
||||
StoreStubCompiler compiler(extra_ic_state);
|
||||
StoreStubCompiler compiler(strict_mode);
|
||||
{ MaybeObject* maybe_code =
|
||||
compiler.CompileStoreCallback(receiver, callback, name);
|
||||
if (!maybe_code->ToObject(&code)) return maybe_code;
|
||||
@ -699,12 +705,12 @@ MaybeObject* StubCache::ComputeStoreCallback(
|
||||
MaybeObject* StubCache::ComputeStoreInterceptor(
|
||||
String* name,
|
||||
JSObject* receiver,
|
||||
Code::ExtraICState extra_ic_state) {
|
||||
StrictModeFlag strict_mode) {
|
||||
Code::Flags flags = Code::ComputeMonomorphicFlags(
|
||||
Code::STORE_IC, INTERCEPTOR, extra_ic_state);
|
||||
Code::STORE_IC, INTERCEPTOR, strict_mode);
|
||||
Object* code = receiver->map()->FindInCodeCache(name, flags);
|
||||
if (code->IsUndefined()) {
|
||||
StoreStubCompiler compiler(extra_ic_state);
|
||||
StoreStubCompiler compiler(strict_mode);
|
||||
{ MaybeObject* maybe_code =
|
||||
compiler.CompileStoreInterceptor(receiver, name);
|
||||
if (!maybe_code->ToObject(&code)) return maybe_code;
|
||||
@ -724,12 +730,14 @@ MaybeObject* StubCache::ComputeStoreInterceptor(
|
||||
MaybeObject* StubCache::ComputeKeyedStoreField(String* name,
|
||||
JSObject* receiver,
|
||||
int field_index,
|
||||
Map* transition) {
|
||||
Map* transition,
|
||||
StrictModeFlag strict_mode) {
|
||||
PropertyType type = (transition == NULL) ? FIELD : MAP_TRANSITION;
|
||||
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, type);
|
||||
Code::Flags flags = Code::ComputeMonomorphicFlags(
|
||||
Code::KEYED_STORE_IC, type, strict_mode);
|
||||
Object* code = receiver->map()->FindInCodeCache(name, flags);
|
||||
if (code->IsUndefined()) {
|
||||
KeyedStoreStubCompiler compiler;
|
||||
KeyedStoreStubCompiler compiler(strict_mode);
|
||||
{ MaybeObject* maybe_code =
|
||||
compiler.CompileStoreField(receiver, field_index, transition, name);
|
||||
if (!maybe_code->ToObject(&code)) return maybe_code;
|
||||
@ -1417,12 +1425,17 @@ MaybeObject* LoadPropertyWithInterceptorForCall(Arguments args) {
|
||||
|
||||
|
||||
MaybeObject* StoreInterceptorProperty(Arguments args) {
|
||||
ASSERT(args.length() == 4);
|
||||
JSObject* recv = JSObject::cast(args[0]);
|
||||
String* name = String::cast(args[1]);
|
||||
Object* value = args[2];
|
||||
StrictModeFlag strict =
|
||||
static_cast<StrictModeFlag>(Smi::cast(args[3])->value());
|
||||
ASSERT(strict == kStrictMode || strict == kNonStrictMode);
|
||||
ASSERT(recv->HasNamedInterceptor());
|
||||
PropertyAttributes attr = NONE;
|
||||
MaybeObject* result = recv->SetPropertyWithInterceptor(name, value, attr);
|
||||
MaybeObject* result = recv->SetPropertyWithInterceptor(
|
||||
name, value, attr, strict);
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -1675,8 +1688,8 @@ MaybeObject* KeyedLoadStubCompiler::GetCode(PropertyType type, String* name) {
|
||||
|
||||
|
||||
MaybeObject* StoreStubCompiler::GetCode(PropertyType type, String* name) {
|
||||
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::STORE_IC, type,
|
||||
extra_ic_state_);
|
||||
Code::Flags flags = Code::ComputeMonomorphicFlags(
|
||||
Code::STORE_IC, type, strict_mode_);
|
||||
MaybeObject* result = GetCodeWithFlags(flags, name);
|
||||
if (!result->IsFailure()) {
|
||||
PROFILE(CodeCreateEvent(Logger::STORE_IC_TAG,
|
||||
@ -1691,7 +1704,8 @@ MaybeObject* StoreStubCompiler::GetCode(PropertyType type, String* name) {
|
||||
|
||||
|
||||
MaybeObject* KeyedStoreStubCompiler::GetCode(PropertyType type, String* name) {
|
||||
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, type);
|
||||
Code::Flags flags = Code::ComputeMonomorphicFlags(
|
||||
Code::KEYED_STORE_IC, type, strict_mode_);
|
||||
MaybeObject* result = GetCodeWithFlags(flags, name);
|
||||
if (!result->IsFailure()) {
|
||||
PROFILE(CodeCreateEvent(Logger::KEYED_STORE_IC_TAG,
|
||||
|
33
deps/v8/src/stub-cache.h
vendored
33
deps/v8/src/stub-cache.h
vendored
@ -143,27 +143,27 @@ class StubCache : public AllStatic {
|
||||
JSObject* receiver,
|
||||
int field_index,
|
||||
Map* transition,
|
||||
Code::ExtraICState extra_ic_state);
|
||||
StrictModeFlag strict_mode);
|
||||
|
||||
MUST_USE_RESULT static MaybeObject* ComputeStoreNormal(
|
||||
Code::ExtraICState extra_ic_state);
|
||||
StrictModeFlag strict_mode);
|
||||
|
||||
MUST_USE_RESULT static MaybeObject* ComputeStoreGlobal(
|
||||
String* name,
|
||||
GlobalObject* receiver,
|
||||
JSGlobalPropertyCell* cell,
|
||||
Code::ExtraICState extra_ic_state);
|
||||
StrictModeFlag strict_mode);
|
||||
|
||||
MUST_USE_RESULT static MaybeObject* ComputeStoreCallback(
|
||||
String* name,
|
||||
JSObject* receiver,
|
||||
AccessorInfo* callback,
|
||||
Code::ExtraICState extra_ic_state);
|
||||
StrictModeFlag strict_mode);
|
||||
|
||||
MUST_USE_RESULT static MaybeObject* ComputeStoreInterceptor(
|
||||
String* name,
|
||||
JSObject* receiver,
|
||||
Code::ExtraICState extra_ic_state);
|
||||
StrictModeFlag strict_mode);
|
||||
|
||||
// ---
|
||||
|
||||
@ -171,17 +171,21 @@ class StubCache : public AllStatic {
|
||||
String* name,
|
||||
JSObject* receiver,
|
||||
int field_index,
|
||||
Map* transition = NULL);
|
||||
Map* transition,
|
||||
StrictModeFlag strict_mode);
|
||||
|
||||
MUST_USE_RESULT static MaybeObject* ComputeKeyedStoreSpecialized(
|
||||
JSObject* receiver);
|
||||
JSObject* receiver,
|
||||
StrictModeFlag strict_mode);
|
||||
|
||||
MUST_USE_RESULT static MaybeObject* ComputeKeyedStorePixelArray(
|
||||
JSObject* receiver);
|
||||
JSObject* receiver,
|
||||
StrictModeFlag strict_mode);
|
||||
|
||||
MUST_USE_RESULT static MaybeObject* ComputeKeyedLoadOrStoreExternalArray(
|
||||
JSObject* receiver,
|
||||
bool is_store);
|
||||
bool is_store,
|
||||
StrictModeFlag strict_mode);
|
||||
|
||||
// ---
|
||||
|
||||
@ -628,8 +632,8 @@ class KeyedLoadStubCompiler: public StubCompiler {
|
||||
|
||||
class StoreStubCompiler: public StubCompiler {
|
||||
public:
|
||||
explicit StoreStubCompiler(Code::ExtraICState extra_ic_state)
|
||||
: extra_ic_state_(extra_ic_state) { }
|
||||
explicit StoreStubCompiler(StrictModeFlag strict_mode)
|
||||
: strict_mode_(strict_mode) { }
|
||||
|
||||
MUST_USE_RESULT MaybeObject* CompileStoreField(JSObject* object,
|
||||
int index,
|
||||
@ -649,12 +653,15 @@ class StoreStubCompiler: public StubCompiler {
|
||||
private:
|
||||
MaybeObject* GetCode(PropertyType type, String* name);
|
||||
|
||||
Code::ExtraICState extra_ic_state_;
|
||||
StrictModeFlag strict_mode_;
|
||||
};
|
||||
|
||||
|
||||
class KeyedStoreStubCompiler: public StubCompiler {
|
||||
public:
|
||||
explicit KeyedStoreStubCompiler(StrictModeFlag strict_mode)
|
||||
: strict_mode_(strict_mode) { }
|
||||
|
||||
MUST_USE_RESULT MaybeObject* CompileStoreField(JSObject* object,
|
||||
int index,
|
||||
Map* transition,
|
||||
@ -666,6 +673,8 @@ class KeyedStoreStubCompiler: public StubCompiler {
|
||||
|
||||
private:
|
||||
MaybeObject* GetCode(PropertyType type, String* name);
|
||||
|
||||
StrictModeFlag strict_mode_;
|
||||
};
|
||||
|
||||
|
||||
|
2
deps/v8/src/version.cc
vendored
2
deps/v8/src/version.cc
vendored
@ -34,7 +34,7 @@
|
||||
// cannot be changed without changing the SCons build script.
|
||||
#define MAJOR_VERSION 3
|
||||
#define MINOR_VERSION 1
|
||||
#define BUILD_NUMBER 6
|
||||
#define BUILD_NUMBER 8
|
||||
#define PATCH_LEVEL 0
|
||||
#define CANDIDATE_VERSION false
|
||||
|
||||
|
6
deps/v8/src/virtual-frame-heavy-inl.h
vendored
6
deps/v8/src/virtual-frame-heavy-inl.h
vendored
@ -82,10 +82,8 @@ void VirtualFrame::Push(Register reg, TypeInfo info) {
|
||||
}
|
||||
|
||||
|
||||
void VirtualFrame::Push(Handle<Object> value) {
|
||||
FrameElement element =
|
||||
FrameElement::ConstantElement(value, FrameElement::NOT_SYNCED);
|
||||
elements_.Add(element);
|
||||
bool VirtualFrame::ConstantPoolOverflowed() {
|
||||
return FrameElement::ConstantPoolOverflowed();
|
||||
}
|
||||
|
||||
|
||||
|
2
deps/v8/src/x64/assembler-x64-inl.h
vendored
2
deps/v8/src/x64/assembler-x64-inl.h
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2009 the V8 project authors. All rights reserved.
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
|
24
deps/v8/src/x64/assembler-x64.cc
vendored
24
deps/v8/src/x64/assembler-x64.cc
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
@ -2995,6 +2995,28 @@ void Assembler::divsd(XMMRegister dst, XMMRegister src) {
|
||||
}
|
||||
|
||||
|
||||
void Assembler::andpd(XMMRegister dst, XMMRegister src) {
|
||||
EnsureSpace ensure_space(this);
|
||||
last_pc_ = pc_;
|
||||
emit(0x66);
|
||||
emit_optional_rex_32(dst, src);
|
||||
emit(0x0F);
|
||||
emit(0x54);
|
||||
emit_sse_operand(dst, src);
|
||||
}
|
||||
|
||||
|
||||
void Assembler::orpd(XMMRegister dst, XMMRegister src) {
|
||||
EnsureSpace ensure_space(this);
|
||||
last_pc_ = pc_;
|
||||
emit(0x66);
|
||||
emit_optional_rex_32(dst, src);
|
||||
emit(0x0F);
|
||||
emit(0x56);
|
||||
emit_sse_operand(dst, src);
|
||||
}
|
||||
|
||||
|
||||
void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
|
||||
EnsureSpace ensure_space(this);
|
||||
last_pc_ = pc_;
|
||||
|
4
deps/v8/src/x64/assembler-x64.h
vendored
4
deps/v8/src/x64/assembler-x64.h
vendored
@ -30,7 +30,7 @@
|
||||
|
||||
// The original source code covered by the above license above has been
|
||||
// modified significantly by Google Inc.
|
||||
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
|
||||
// A lightweight X64 Assembler.
|
||||
|
||||
@ -1284,6 +1284,8 @@ class Assembler : public Malloced {
|
||||
void mulsd(XMMRegister dst, XMMRegister src);
|
||||
void divsd(XMMRegister dst, XMMRegister src);
|
||||
|
||||
void andpd(XMMRegister dst, XMMRegister src);
|
||||
void orpd(XMMRegister dst, XMMRegister src);
|
||||
void xorpd(XMMRegister dst, XMMRegister src);
|
||||
void sqrtsd(XMMRegister dst, XMMRegister src);
|
||||
|
||||
|
2
deps/v8/src/x64/builtins-x64.cc
vendored
2
deps/v8/src/x64/builtins-x64.cc
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
|
321
deps/v8/src/x64/code-stubs-x64.cc
vendored
321
deps/v8/src/x64/code-stubs-x64.cc
vendored
@ -1506,12 +1506,25 @@ void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
|
||||
|
||||
|
||||
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
|
||||
// Input on stack:
|
||||
// TAGGED case:
|
||||
// Input:
|
||||
// rsp[8]: argument (should be number).
|
||||
// rsp[0]: return address.
|
||||
// Output:
|
||||
// rax: tagged double result.
|
||||
// UNTAGGED case:
|
||||
// Input::
|
||||
// rsp[0]: return address.
|
||||
// xmm1: untagged double input argument
|
||||
// Output:
|
||||
// xmm1: untagged double result.
|
||||
|
||||
Label runtime_call;
|
||||
Label runtime_call_clear_stack;
|
||||
Label input_not_smi;
|
||||
Label skip_cache;
|
||||
const bool tagged = (argument_type_ == TAGGED);
|
||||
if (tagged) {
|
||||
NearLabel input_not_smi;
|
||||
NearLabel loaded;
|
||||
// Test that rax is a number.
|
||||
__ movq(rax, Operand(rsp, kPointerSize));
|
||||
@ -1519,18 +1532,18 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
|
||||
// Input is a smi. Untag and load it onto the FPU stack.
|
||||
// Then load the bits of the double into rbx.
|
||||
__ SmiToInteger32(rax, rax);
|
||||
__ subq(rsp, Immediate(kPointerSize));
|
||||
__ subq(rsp, Immediate(kDoubleSize));
|
||||
__ cvtlsi2sd(xmm1, rax);
|
||||
__ movsd(Operand(rsp, 0), xmm1);
|
||||
__ movq(rbx, xmm1);
|
||||
__ movq(rdx, xmm1);
|
||||
__ fld_d(Operand(rsp, 0));
|
||||
__ addq(rsp, Immediate(kPointerSize));
|
||||
__ addq(rsp, Immediate(kDoubleSize));
|
||||
__ jmp(&loaded);
|
||||
|
||||
__ bind(&input_not_smi);
|
||||
// Check if input is a HeapNumber.
|
||||
__ Move(rbx, Factory::heap_number_map());
|
||||
__ LoadRoot(rbx, Heap::kHeapNumberMapRootIndex);
|
||||
__ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
|
||||
__ j(not_equal, &runtime_call);
|
||||
// Input is a HeapNumber. Push it on the FPU stack and load its
|
||||
@ -1538,8 +1551,14 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
|
||||
__ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
|
||||
__ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
|
||||
__ movq(rdx, rbx);
|
||||
|
||||
__ bind(&loaded);
|
||||
// ST[0] == double value
|
||||
} else { // UNTAGGED.
|
||||
__ movq(rbx, xmm1);
|
||||
__ movq(rdx, xmm1);
|
||||
}
|
||||
|
||||
// ST[0] == double value, if TAGGED.
|
||||
// rbx = bits of double value.
|
||||
// rdx = also bits of double value.
|
||||
// Compute hash (h is 32 bits, bits are 64 and the shifts are arithmetic):
|
||||
@ -1571,7 +1590,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
|
||||
// rax points to the cache for the type type_.
|
||||
// If NULL, the cache hasn't been initialized yet, so go through runtime.
|
||||
__ testq(rax, rax);
|
||||
__ j(zero, &runtime_call_clear_stack);
|
||||
__ j(zero, &runtime_call_clear_stack); // Only clears stack if TAGGED.
|
||||
#ifdef DEBUG
|
||||
// Check that the layout of cache elements match expectations.
|
||||
{ // NOLINT - doesn't like a single brace on a line.
|
||||
@ -1597,30 +1616,70 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
|
||||
__ j(not_equal, &cache_miss);
|
||||
// Cache hit!
|
||||
__ movq(rax, Operand(rcx, 2 * kIntSize));
|
||||
if (tagged) {
|
||||
__ fstp(0); // Clear FPU stack.
|
||||
__ ret(kPointerSize);
|
||||
} else { // UNTAGGED.
|
||||
__ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
__ bind(&cache_miss);
|
||||
// Update cache with new value.
|
||||
Label nan_result;
|
||||
GenerateOperation(masm, &nan_result);
|
||||
if (tagged) {
|
||||
__ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack);
|
||||
} else { // UNTAGGED.
|
||||
__ AllocateHeapNumber(rax, rdi, &skip_cache);
|
||||
__ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
|
||||
__ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
|
||||
}
|
||||
GenerateOperation(masm);
|
||||
__ movq(Operand(rcx, 0), rbx);
|
||||
__ movq(Operand(rcx, 2 * kIntSize), rax);
|
||||
__ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
|
||||
if (tagged) {
|
||||
__ ret(kPointerSize);
|
||||
} else { // UNTAGGED.
|
||||
__ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
|
||||
__ Ret();
|
||||
|
||||
// Skip cache and return answer directly, only in untagged case.
|
||||
__ bind(&skip_cache);
|
||||
__ subq(rsp, Immediate(kDoubleSize));
|
||||
__ movsd(Operand(rsp, 0), xmm1);
|
||||
__ fld_d(Operand(rsp, 0));
|
||||
GenerateOperation(masm);
|
||||
__ fstp_d(Operand(rsp, 0));
|
||||
__ movsd(xmm1, Operand(rsp, 0));
|
||||
__ addq(rsp, Immediate(kDoubleSize));
|
||||
// We return the value in xmm1 without adding it to the cache, but
|
||||
// we cause a scavenging GC so that future allocations will succeed.
|
||||
__ EnterInternalFrame();
|
||||
// Allocate an unused object bigger than a HeapNumber.
|
||||
__ Push(Smi::FromInt(2 * kDoubleSize));
|
||||
__ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
|
||||
__ LeaveInternalFrame();
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
// Call runtime, doing whatever allocation and cleanup is necessary.
|
||||
if (tagged) {
|
||||
__ bind(&runtime_call_clear_stack);
|
||||
__ fstp(0);
|
||||
__ bind(&runtime_call);
|
||||
__ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
|
||||
|
||||
__ bind(&nan_result);
|
||||
__ fstp(0); // Remove argument from FPU stack.
|
||||
__ LoadRoot(rax, Heap::kNanValueRootIndex);
|
||||
__ movq(Operand(rcx, 0), rbx);
|
||||
__ movq(Operand(rcx, 2 * kIntSize), rax);
|
||||
__ ret(kPointerSize);
|
||||
} else { // UNTAGGED.
|
||||
__ bind(&runtime_call_clear_stack);
|
||||
__ bind(&runtime_call);
|
||||
__ AllocateHeapNumber(rax, rdi, &skip_cache);
|
||||
__ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
|
||||
__ EnterInternalFrame();
|
||||
__ push(rax);
|
||||
__ CallRuntime(RuntimeFunction(), 1);
|
||||
__ LeaveInternalFrame();
|
||||
__ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
|
||||
__ Ret();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1637,9 +1696,9 @@ Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
|
||||
}
|
||||
|
||||
|
||||
void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm,
|
||||
Label* on_nan_result) {
|
||||
void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm) {
|
||||
// Registers:
|
||||
// rax: Newly allocated HeapNumber, which must be preserved.
|
||||
// rbx: Bits of input double. Must be preserved.
|
||||
// rcx: Pointer to cache entry. Must be preserved.
|
||||
// st(0): Input double
|
||||
@ -1661,9 +1720,18 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm,
|
||||
__ j(below, &in_range);
|
||||
// Check for infinity and NaN. Both return NaN for sin.
|
||||
__ cmpl(rdi, Immediate(0x7ff));
|
||||
__ j(equal, on_nan_result);
|
||||
NearLabel non_nan_result;
|
||||
__ j(not_equal, &non_nan_result);
|
||||
// Input is +/-Infinity or NaN. Result is NaN.
|
||||
__ fstp(0);
|
||||
__ LoadRoot(kScratchRegister, Heap::kNanValueRootIndex);
|
||||
__ fld_d(FieldOperand(kScratchRegister, HeapNumber::kValueOffset));
|
||||
__ jmp(&done);
|
||||
|
||||
__ bind(&non_nan_result);
|
||||
|
||||
// Use fpmod to restrict argument to the range +/-2*PI.
|
||||
__ movq(rdi, rax); // Save rax before using fnstsw_ax.
|
||||
__ fldpi();
|
||||
__ fadd(0);
|
||||
__ fld(1);
|
||||
@ -1696,6 +1764,7 @@ void TranscendentalCacheStub::GenerateOperation(MacroAssembler* masm,
|
||||
// FPU Stack: input % 2*pi, 2*pi,
|
||||
__ fstp(0);
|
||||
// FPU Stack: input % 2*pi
|
||||
__ movq(rax, rdi); // Restore rax, pointer to the new HeapNumber.
|
||||
__ bind(&in_range);
|
||||
switch (type_) {
|
||||
case TranscendentalCache::SIN:
|
||||
@ -1948,8 +2017,8 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
|
||||
__ AbortIfSmi(rax);
|
||||
}
|
||||
|
||||
__ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
|
||||
__ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
|
||||
__ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
|
||||
Heap::kHeapNumberMapRootIndex);
|
||||
__ j(not_equal, &slow);
|
||||
// Operand is a float, negate its value by flipping sign bit.
|
||||
__ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
|
||||
@ -1978,8 +2047,8 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
|
||||
}
|
||||
|
||||
// Check if the operand is a heap number.
|
||||
__ movq(rdx, FieldOperand(rax, HeapObject::kMapOffset));
|
||||
__ CompareRoot(rdx, Heap::kHeapNumberMapRootIndex);
|
||||
__ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
|
||||
Heap::kHeapNumberMapRootIndex);
|
||||
__ j(not_equal, &slow);
|
||||
|
||||
// Convert the heap number in rax to an untagged integer in rcx.
|
||||
@ -2012,6 +2081,157 @@ void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
|
||||
}
|
||||
|
||||
|
||||
void MathPowStub::Generate(MacroAssembler* masm) {
|
||||
// Registers are used as follows:
|
||||
// rdx = base
|
||||
// rax = exponent
|
||||
// rcx = temporary, result
|
||||
|
||||
Label allocate_return, call_runtime;
|
||||
|
||||
// Load input parameters.
|
||||
__ movq(rdx, Operand(rsp, 2 * kPointerSize));
|
||||
__ movq(rax, Operand(rsp, 1 * kPointerSize));
|
||||
|
||||
// Save 1 in xmm3 - we need this several times later on.
|
||||
__ movl(rcx, Immediate(1));
|
||||
__ cvtlsi2sd(xmm3, rcx);
|
||||
|
||||
Label exponent_nonsmi;
|
||||
Label base_nonsmi;
|
||||
// If the exponent is a heap number go to that specific case.
|
||||
__ JumpIfNotSmi(rax, &exponent_nonsmi);
|
||||
__ JumpIfNotSmi(rdx, &base_nonsmi);
|
||||
|
||||
// Optimized version when both exponent and base are smis.
|
||||
Label powi;
|
||||
__ SmiToInteger32(rdx, rdx);
|
||||
__ cvtlsi2sd(xmm0, rdx);
|
||||
__ jmp(&powi);
|
||||
// Exponent is a smi and base is a heapnumber.
|
||||
__ bind(&base_nonsmi);
|
||||
__ CompareRoot(FieldOperand(rdx, HeapObject::kMapOffset),
|
||||
Heap::kHeapNumberMapRootIndex);
|
||||
__ j(not_equal, &call_runtime);
|
||||
|
||||
__ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
|
||||
|
||||
// Optimized version of pow if exponent is a smi.
|
||||
// xmm0 contains the base.
|
||||
__ bind(&powi);
|
||||
__ SmiToInteger32(rax, rax);
|
||||
|
||||
// Save exponent in base as we need to check if exponent is negative later.
|
||||
// We know that base and exponent are in different registers.
|
||||
__ movq(rdx, rax);
|
||||
|
||||
// Get absolute value of exponent.
|
||||
NearLabel no_neg;
|
||||
__ cmpl(rax, Immediate(0));
|
||||
__ j(greater_equal, &no_neg);
|
||||
__ negl(rax);
|
||||
__ bind(&no_neg);
|
||||
|
||||
// Load xmm1 with 1.
|
||||
__ movsd(xmm1, xmm3);
|
||||
NearLabel while_true;
|
||||
NearLabel no_multiply;
|
||||
|
||||
__ bind(&while_true);
|
||||
__ shrl(rax, Immediate(1));
|
||||
__ j(not_carry, &no_multiply);
|
||||
__ mulsd(xmm1, xmm0);
|
||||
__ bind(&no_multiply);
|
||||
__ mulsd(xmm0, xmm0);
|
||||
__ j(not_zero, &while_true);
|
||||
|
||||
// Base has the original value of the exponent - if the exponent is
|
||||
// negative return 1/result.
|
||||
__ testl(rdx, rdx);
|
||||
__ j(positive, &allocate_return);
|
||||
// Special case if xmm1 has reached infinity.
|
||||
__ divsd(xmm3, xmm1);
|
||||
__ movsd(xmm1, xmm3);
|
||||
__ xorpd(xmm0, xmm0);
|
||||
__ ucomisd(xmm0, xmm1);
|
||||
__ j(equal, &call_runtime);
|
||||
|
||||
__ jmp(&allocate_return);
|
||||
|
||||
// Exponent (or both) is a heapnumber - no matter what we should now work
|
||||
// on doubles.
|
||||
__ bind(&exponent_nonsmi);
|
||||
__ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
|
||||
Heap::kHeapNumberMapRootIndex);
|
||||
__ j(not_equal, &call_runtime);
|
||||
__ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
|
||||
// Test if exponent is nan.
|
||||
__ ucomisd(xmm1, xmm1);
|
||||
__ j(parity_even, &call_runtime);
|
||||
|
||||
NearLabel base_not_smi;
|
||||
NearLabel handle_special_cases;
|
||||
__ JumpIfNotSmi(rdx, &base_not_smi);
|
||||
__ SmiToInteger32(rdx, rdx);
|
||||
__ cvtlsi2sd(xmm0, rdx);
|
||||
__ jmp(&handle_special_cases);
|
||||
|
||||
__ bind(&base_not_smi);
|
||||
__ CompareRoot(FieldOperand(rdx, HeapObject::kMapOffset),
|
||||
Heap::kHeapNumberMapRootIndex);
|
||||
__ j(not_equal, &call_runtime);
|
||||
__ movl(rcx, FieldOperand(rdx, HeapNumber::kExponentOffset));
|
||||
__ andl(rcx, Immediate(HeapNumber::kExponentMask));
|
||||
__ cmpl(rcx, Immediate(HeapNumber::kExponentMask));
|
||||
// base is NaN or +/-Infinity
|
||||
__ j(greater_equal, &call_runtime);
|
||||
__ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
|
||||
|
||||
// base is in xmm0 and exponent is in xmm1.
|
||||
__ bind(&handle_special_cases);
|
||||
NearLabel not_minus_half;
|
||||
// Test for -0.5.
|
||||
// Load xmm2 with -0.5.
|
||||
__ movq(rcx, V8_UINT64_C(0xBFE0000000000000), RelocInfo::NONE);
|
||||
__ movq(xmm2, rcx);
|
||||
// xmm2 now has -0.5.
|
||||
__ ucomisd(xmm2, xmm1);
|
||||
__ j(not_equal, ¬_minus_half);
|
||||
|
||||
// Calculates reciprocal of square root.
|
||||
// sqrtsd returns -0 when input is -0. ECMA spec requires +0.
|
||||
__ xorpd(xmm1, xmm1);
|
||||
__ addsd(xmm1, xmm0);
|
||||
__ sqrtsd(xmm1, xmm1);
|
||||
__ divsd(xmm3, xmm1);
|
||||
__ movsd(xmm1, xmm3);
|
||||
__ jmp(&allocate_return);
|
||||
|
||||
// Test for 0.5.
|
||||
__ bind(¬_minus_half);
|
||||
// Load xmm2 with 0.5.
|
||||
// Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
|
||||
__ addsd(xmm2, xmm3);
|
||||
// xmm2 now has 0.5.
|
||||
__ ucomisd(xmm2, xmm1);
|
||||
__ j(not_equal, &call_runtime);
|
||||
// Calculates square root.
|
||||
// sqrtsd returns -0 when input is -0. ECMA spec requires +0.
|
||||
__ xorpd(xmm1, xmm1);
|
||||
__ addsd(xmm1, xmm0);
|
||||
__ sqrtsd(xmm1, xmm1);
|
||||
|
||||
__ bind(&allocate_return);
|
||||
__ AllocateHeapNumber(rcx, rax, &call_runtime);
|
||||
__ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm1);
|
||||
__ movq(rax, rcx);
|
||||
__ ret(2 * kPointerSize);
|
||||
|
||||
__ bind(&call_runtime);
|
||||
__ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
|
||||
}
|
||||
|
||||
|
||||
void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
|
||||
// The key is in rdx and the parameter count is in rax.
|
||||
|
||||
@ -4613,6 +4833,61 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
|
||||
__ TailCallRuntime(Runtime::kStringCompare, 2, 1);
|
||||
}
|
||||
|
||||
|
||||
void StringCharAtStub::Generate(MacroAssembler* masm) {
|
||||
// Expects two arguments (object, index) on the stack:
|
||||
|
||||
// Stack frame on entry.
|
||||
// rsp[0]: return address
|
||||
// rsp[8]: index
|
||||
// rsp[16]: object
|
||||
|
||||
Register object = rbx;
|
||||
Register index = rax;
|
||||
Register scratch1 = rcx;
|
||||
Register scratch2 = rdx;
|
||||
Register result = rax;
|
||||
|
||||
__ pop(scratch1); // Return address.
|
||||
__ pop(index);
|
||||
__ pop(object);
|
||||
__ push(scratch1);
|
||||
|
||||
Label need_conversion;
|
||||
Label index_out_of_range;
|
||||
Label done;
|
||||
StringCharAtGenerator generator(object,
|
||||
index,
|
||||
scratch1,
|
||||
scratch2,
|
||||
result,
|
||||
&need_conversion,
|
||||
&need_conversion,
|
||||
&index_out_of_range,
|
||||
STRING_INDEX_IS_NUMBER);
|
||||
generator.GenerateFast(masm);
|
||||
__ jmp(&done);
|
||||
|
||||
__ bind(&index_out_of_range);
|
||||
// When the index is out of range, the spec requires us to return
|
||||
// the empty string.
|
||||
__ Move(result, Factory::empty_string());
|
||||
__ jmp(&done);
|
||||
|
||||
__ bind(&need_conversion);
|
||||
// Move smi zero into the result register, which will trigger
|
||||
// conversion.
|
||||
__ Move(result, Smi::FromInt(0));
|
||||
__ jmp(&done);
|
||||
|
||||
StubRuntimeCallHelper call_helper;
|
||||
generator.GenerateSlow(masm, call_helper);
|
||||
|
||||
__ bind(&done);
|
||||
__ ret(0);
|
||||
}
|
||||
|
||||
|
||||
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
|
||||
ASSERT(state_ == CompareIC::SMIS);
|
||||
NearLabel miss;
|
||||
|
18
deps/v8/src/x64/code-stubs-x64.h
vendored
18
deps/v8/src/x64/code-stubs-x64.h
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
@ -39,15 +39,23 @@ namespace internal {
|
||||
// TranscendentalCache runtime function.
|
||||
class TranscendentalCacheStub: public CodeStub {
|
||||
public:
|
||||
explicit TranscendentalCacheStub(TranscendentalCache::Type type)
|
||||
: type_(type) {}
|
||||
enum ArgumentType {
|
||||
TAGGED = 0,
|
||||
UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
|
||||
};
|
||||
|
||||
explicit TranscendentalCacheStub(TranscendentalCache::Type type,
|
||||
ArgumentType argument_type)
|
||||
: type_(type), argument_type_(argument_type) {}
|
||||
void Generate(MacroAssembler* masm);
|
||||
private:
|
||||
TranscendentalCache::Type type_;
|
||||
ArgumentType argument_type_;
|
||||
|
||||
Major MajorKey() { return TranscendentalCache; }
|
||||
int MinorKey() { return type_; }
|
||||
int MinorKey() { return type_ | argument_type_; }
|
||||
Runtime::FunctionId RuntimeFunction();
|
||||
void GenerateOperation(MacroAssembler* masm, Label* on_nan_result);
|
||||
void GenerateOperation(MacroAssembler* masm);
|
||||
};
|
||||
|
||||
|
||||
|
2
deps/v8/src/x64/codegen-x64-inl.h
vendored
2
deps/v8/src/x64/codegen-x64-inl.h
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2009 the V8 project authors. All rights reserved.
|
||||
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
|
38
deps/v8/src/x64/codegen-x64.cc
vendored
38
deps/v8/src/x64/codegen-x64.cc
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
@ -2747,7 +2747,8 @@ void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
|
||||
frame_->EmitPush(rsi); // The context is the first argument.
|
||||
frame_->EmitPush(kScratchRegister);
|
||||
frame_->EmitPush(Smi::FromInt(is_eval() ? 1 : 0));
|
||||
Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
|
||||
frame_->EmitPush(Smi::FromInt(strict_mode_flag()));
|
||||
Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 4);
|
||||
// Return value is ignored.
|
||||
}
|
||||
|
||||
@ -4605,7 +4606,8 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
|
||||
// by initialization.
|
||||
value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
|
||||
} else {
|
||||
value = frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
|
||||
frame_->Push(Smi::FromInt(strict_mode_flag()));
|
||||
value = frame_->CallRuntime(Runtime::kStoreContextSlot, 4);
|
||||
}
|
||||
// Storing a variable must keep the (new) value on the expression
|
||||
// stack. This is necessary for compiling chained assignment
|
||||
@ -4914,8 +4916,9 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
|
||||
Load(property->key());
|
||||
Load(property->value());
|
||||
if (property->emit_store()) {
|
||||
frame_->Push(Smi::FromInt(NONE)); // PropertyAttributes
|
||||
// Ignore the result.
|
||||
Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 3);
|
||||
Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 4);
|
||||
} else {
|
||||
frame_->Drop(3);
|
||||
}
|
||||
@ -7030,7 +7033,8 @@ void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
|
||||
void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
|
||||
ASSERT_EQ(args->length(), 1);
|
||||
Load(args->at(0));
|
||||
TranscendentalCacheStub stub(TranscendentalCache::SIN);
|
||||
TranscendentalCacheStub stub(TranscendentalCache::SIN,
|
||||
TranscendentalCacheStub::TAGGED);
|
||||
Result result = frame_->CallStub(&stub, 1);
|
||||
frame_->Push(&result);
|
||||
}
|
||||
@ -7039,7 +7043,8 @@ void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
|
||||
void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
|
||||
ASSERT_EQ(args->length(), 1);
|
||||
Load(args->at(0));
|
||||
TranscendentalCacheStub stub(TranscendentalCache::COS);
|
||||
TranscendentalCacheStub stub(TranscendentalCache::COS,
|
||||
TranscendentalCacheStub::TAGGED);
|
||||
Result result = frame_->CallStub(&stub, 1);
|
||||
frame_->Push(&result);
|
||||
}
|
||||
@ -7048,7 +7053,8 @@ void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
|
||||
void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) {
|
||||
ASSERT_EQ(args->length(), 1);
|
||||
Load(args->at(0));
|
||||
TranscendentalCacheStub stub(TranscendentalCache::LOG);
|
||||
TranscendentalCacheStub stub(TranscendentalCache::LOG,
|
||||
TranscendentalCacheStub::TAGGED);
|
||||
Result result = frame_->CallStub(&stub, 1);
|
||||
frame_->Push(&result);
|
||||
}
|
||||
@ -8072,8 +8078,12 @@ class DeferredReferenceSetKeyedValue: public DeferredCode {
|
||||
public:
|
||||
DeferredReferenceSetKeyedValue(Register value,
|
||||
Register key,
|
||||
Register receiver)
|
||||
: value_(value), key_(key), receiver_(receiver) {
|
||||
Register receiver,
|
||||
StrictModeFlag strict_mode)
|
||||
: value_(value),
|
||||
key_(key),
|
||||
receiver_(receiver),
|
||||
strict_mode_(strict_mode) {
|
||||
set_comment("[ DeferredReferenceSetKeyedValue");
|
||||
}
|
||||
|
||||
@ -8086,6 +8096,7 @@ class DeferredReferenceSetKeyedValue: public DeferredCode {
|
||||
Register key_;
|
||||
Register receiver_;
|
||||
Label patch_site_;
|
||||
StrictModeFlag strict_mode_;
|
||||
};
|
||||
|
||||
|
||||
@ -8137,7 +8148,9 @@ void DeferredReferenceSetKeyedValue::Generate() {
|
||||
}
|
||||
|
||||
// Call the IC stub.
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
|
||||
Handle<Code> ic(Builtins::builtin(
|
||||
(strict_mode_ == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict
|
||||
: Builtins::KeyedStoreIC_Initialize));
|
||||
__ Call(ic, RelocInfo::CODE_TARGET);
|
||||
// The delta from the start of the map-compare instructions (initial movq)
|
||||
// to the test instruction. We use masm_-> directly here instead of the
|
||||
@ -8478,7 +8491,8 @@ Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
|
||||
DeferredReferenceSetKeyedValue* deferred =
|
||||
new DeferredReferenceSetKeyedValue(result.reg(),
|
||||
key.reg(),
|
||||
receiver.reg());
|
||||
receiver.reg(),
|
||||
strict_mode_flag());
|
||||
|
||||
// Check that the receiver is not a smi.
|
||||
__ JumpIfSmi(receiver.reg(), deferred->entry_label());
|
||||
@ -8540,7 +8554,7 @@ Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
|
||||
|
||||
deferred->BindExit();
|
||||
} else {
|
||||
result = frame()->CallKeyedStoreIC();
|
||||
result = frame()->CallKeyedStoreIC(strict_mode_flag());
|
||||
// Make sure that we do not have a test instruction after the
|
||||
// call. A test instruction after the call is used to
|
||||
// indicate that we have generated an inline version of the
|
||||
|
2
deps/v8/src/x64/codegen-x64.h
vendored
2
deps/v8/src/x64/codegen-x64.h
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
|
2
deps/v8/src/x64/cpu-x64.cc
vendored
2
deps/v8/src/x64/cpu-x64.cc
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2009 the V8 project authors. All rights reserved.
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
|
2
deps/v8/src/x64/debug-x64.cc
vendored
2
deps/v8/src/x64/debug-x64.cc
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
|
146
deps/v8/src/x64/deoptimizer-x64.cc
vendored
146
deps/v8/src/x64/deoptimizer-x64.cc
vendored
@ -224,7 +224,7 @@ void Deoptimizer::PatchStackCheckCodeAt(Address pc_after,
|
||||
// ok:
|
||||
//
|
||||
ASSERT(*(call_target_address - 3) == 0x73 && // jae
|
||||
*(call_target_address - 2) == 0x05 && // offset
|
||||
*(call_target_address - 2) == 0x07 && // offset
|
||||
*(call_target_address - 1) == 0xe8); // call
|
||||
*(call_target_address - 3) = 0x90; // nop
|
||||
*(call_target_address - 2) = 0x90; // nop
|
||||
@ -245,14 +245,154 @@ void Deoptimizer::RevertStackCheckCodeAt(Address pc_after,
|
||||
*(call_target_address - 2) == 0x90 && // nop
|
||||
*(call_target_address - 1) == 0xe8); // call
|
||||
*(call_target_address - 3) = 0x73; // jae
|
||||
*(call_target_address - 2) = 0x05; // offset
|
||||
*(call_target_address - 2) = 0x07; // offset
|
||||
Assembler::set_target_address_at(call_target_address,
|
||||
check_code->entry());
|
||||
}
|
||||
|
||||
|
||||
static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) {
|
||||
ByteArray* translations = data->TranslationByteArray();
|
||||
int length = data->DeoptCount();
|
||||
for (int i = 0; i < length; i++) {
|
||||
if (static_cast<unsigned>(data->AstId(i)->value()) == ast_id) {
|
||||
TranslationIterator it(translations, data->TranslationIndex(i)->value());
|
||||
int value = it.Next();
|
||||
ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
|
||||
// Read the number of frames.
|
||||
value = it.Next();
|
||||
if (value == 1) return i;
|
||||
}
|
||||
}
|
||||
UNREACHABLE();
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
void Deoptimizer::DoComputeOsrOutputFrame() {
|
||||
UNIMPLEMENTED();
|
||||
DeoptimizationInputData* data = DeoptimizationInputData::cast(
|
||||
optimized_code_->deoptimization_data());
|
||||
unsigned ast_id = data->OsrAstId()->value();
|
||||
// TODO(kasperl): This should not be the bailout_id_. It should be
|
||||
// the ast id. Confusing.
|
||||
ASSERT(bailout_id_ == ast_id);
|
||||
|
||||
int bailout_id = LookupBailoutId(data, ast_id);
|
||||
unsigned translation_index = data->TranslationIndex(bailout_id)->value();
|
||||
ByteArray* translations = data->TranslationByteArray();
|
||||
|
||||
TranslationIterator iterator(translations, translation_index);
|
||||
Translation::Opcode opcode =
|
||||
static_cast<Translation::Opcode>(iterator.Next());
|
||||
ASSERT(Translation::BEGIN == opcode);
|
||||
USE(opcode);
|
||||
int count = iterator.Next();
|
||||
ASSERT(count == 1);
|
||||
USE(count);
|
||||
|
||||
opcode = static_cast<Translation::Opcode>(iterator.Next());
|
||||
USE(opcode);
|
||||
ASSERT(Translation::FRAME == opcode);
|
||||
unsigned node_id = iterator.Next();
|
||||
USE(node_id);
|
||||
ASSERT(node_id == ast_id);
|
||||
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator.Next()));
|
||||
USE(function);
|
||||
ASSERT(function == function_);
|
||||
unsigned height = iterator.Next();
|
||||
unsigned height_in_bytes = height * kPointerSize;
|
||||
USE(height_in_bytes);
|
||||
|
||||
unsigned fixed_size = ComputeFixedSize(function_);
|
||||
unsigned input_frame_size = static_cast<unsigned>(input_->GetFrameSize());
|
||||
ASSERT(fixed_size + height_in_bytes == input_frame_size);
|
||||
|
||||
unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
|
||||
unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
|
||||
unsigned outgoing_size = outgoing_height * kPointerSize;
|
||||
unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
|
||||
ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
|
||||
|
||||
if (FLAG_trace_osr) {
|
||||
PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
|
||||
reinterpret_cast<intptr_t>(function_));
|
||||
function_->PrintName();
|
||||
PrintF(" => node=%u, frame=%d->%d]\n",
|
||||
ast_id,
|
||||
input_frame_size,
|
||||
output_frame_size);
|
||||
}
|
||||
|
||||
// There's only one output frame in the OSR case.
|
||||
output_count_ = 1;
|
||||
output_ = new FrameDescription*[1];
|
||||
output_[0] = new(output_frame_size) FrameDescription(
|
||||
output_frame_size, function_);
|
||||
|
||||
// Clear the incoming parameters in the optimized frame to avoid
|
||||
// confusing the garbage collector.
|
||||
unsigned output_offset = output_frame_size - kPointerSize;
|
||||
int parameter_count = function_->shared()->formal_parameter_count() + 1;
|
||||
for (int i = 0; i < parameter_count; ++i) {
|
||||
output_[0]->SetFrameSlot(output_offset, 0);
|
||||
output_offset -= kPointerSize;
|
||||
}
|
||||
|
||||
// Translate the incoming parameters. This may overwrite some of the
|
||||
// incoming argument slots we've just cleared.
|
||||
int input_offset = input_frame_size - kPointerSize;
|
||||
bool ok = true;
|
||||
int limit = input_offset - (parameter_count * kPointerSize);
|
||||
while (ok && input_offset > limit) {
|
||||
ok = DoOsrTranslateCommand(&iterator, &input_offset);
|
||||
}
|
||||
|
||||
// There are no translation commands for the caller's pc and fp, the
|
||||
// context, and the function. Set them up explicitly.
|
||||
for (int i = 0; ok && i < 4; i++) {
|
||||
intptr_t input_value = input_->GetFrameSlot(input_offset);
|
||||
if (FLAG_trace_osr) {
|
||||
PrintF(" [esp + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d] (fixed part)\n",
|
||||
output_offset,
|
||||
input_value,
|
||||
input_offset);
|
||||
}
|
||||
output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
|
||||
input_offset -= kPointerSize;
|
||||
output_offset -= kPointerSize;
|
||||
}
|
||||
|
||||
// Translate the rest of the frame.
|
||||
while (ok && input_offset >= 0) {
|
||||
ok = DoOsrTranslateCommand(&iterator, &input_offset);
|
||||
}
|
||||
|
||||
// If translation of any command failed, continue using the input frame.
|
||||
if (!ok) {
|
||||
delete output_[0];
|
||||
output_[0] = input_;
|
||||
output_[0]->SetPc(reinterpret_cast<intptr_t>(from_));
|
||||
} else {
|
||||
// Setup the frame pointer and the context pointer.
|
||||
output_[0]->SetRegister(rbp.code(), input_->GetRegister(rbp.code()));
|
||||
output_[0]->SetRegister(rsi.code(), input_->GetRegister(rsi.code()));
|
||||
|
||||
unsigned pc_offset = data->OsrPcOffset()->value();
|
||||
intptr_t pc = reinterpret_cast<intptr_t>(
|
||||
optimized_code_->entry() + pc_offset);
|
||||
output_[0]->SetPc(pc);
|
||||
}
|
||||
Code* continuation = Builtins::builtin(Builtins::NotifyOSR);
|
||||
output_[0]->SetContinuation(
|
||||
reinterpret_cast<intptr_t>(continuation->entry()));
|
||||
|
||||
if (FLAG_trace_osr) {
|
||||
PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
|
||||
ok ? "finished" : "aborted",
|
||||
reinterpret_cast<intptr_t>(function));
|
||||
function->PrintName();
|
||||
PrintF(" => pc=0x%0" V8PRIxPTR "]\n", output_[0]->GetPc());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
12
deps/v8/src/x64/disasm-x64.cc
vendored
12
deps/v8/src/x64/disasm-x64.cc
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2009 the V8 project authors. All rights reserved.
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
@ -1040,14 +1040,18 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
|
||||
AppendToBuffer(", %s", NameOfXMMRegister(regop));
|
||||
} else {
|
||||
const char* mnemonic = "?";
|
||||
if (opcode == 0x57) {
|
||||
if (opcode == 0x50) {
|
||||
mnemonic = "movmskpd";
|
||||
} else if (opcode == 0x54) {
|
||||
mnemonic = "andpd";
|
||||
} else if (opcode == 0x56) {
|
||||
mnemonic = "orpd";
|
||||
} else if (opcode == 0x57) {
|
||||
mnemonic = "xorpd";
|
||||
} else if (opcode == 0x2E) {
|
||||
mnemonic = "ucomisd";
|
||||
} else if (opcode == 0x2F) {
|
||||
mnemonic = "comisd";
|
||||
} else if (opcode == 0x50) {
|
||||
mnemonic = "movmskpd";
|
||||
} else {
|
||||
UnimplementedInstruction();
|
||||
}
|
||||
|
2
deps/v8/src/x64/frames-x64.cc
vendored
2
deps/v8/src/x64/frames-x64.cc
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2009 the V8 project authors. All rights reserved.
|
||||
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
|
2
deps/v8/src/x64/frames-x64.h
vendored
2
deps/v8/src/x64/frames-x64.h
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2009 the V8 project authors. All rights reserved.
|
||||
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
|
393
deps/v8/src/x64/full-codegen-x64.cc
vendored
393
deps/v8/src/x64/full-codegen-x64.cc
vendored
@ -207,24 +207,24 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
|
||||
Move(dot_arguments_slot, rcx, rbx, rdx);
|
||||
}
|
||||
|
||||
if (FLAG_trace) {
|
||||
__ CallRuntime(Runtime::kTraceEnter, 0);
|
||||
}
|
||||
|
||||
// Visit the declarations and body unless there is an illegal
|
||||
// redeclaration.
|
||||
if (scope()->HasIllegalRedeclaration()) {
|
||||
Comment cmnt(masm_, "[ Declarations");
|
||||
scope()->VisitIllegalRedeclaration(this);
|
||||
} else {
|
||||
{ Comment cmnt(masm_, "[ Declarations");
|
||||
// For named function expressions, declare the function name as a
|
||||
// constant.
|
||||
if (scope()->is_function_scope() && scope()->function() != NULL) {
|
||||
EmitDeclaration(scope()->function(), Variable::CONST, NULL);
|
||||
}
|
||||
// Visit all the explicit declarations unless there is an illegal
|
||||
// redeclaration.
|
||||
if (scope()->HasIllegalRedeclaration()) {
|
||||
scope()->VisitIllegalRedeclaration(this);
|
||||
} else {
|
||||
VisitDeclarations(scope()->declarations());
|
||||
}
|
||||
}
|
||||
|
||||
if (FLAG_trace) {
|
||||
__ CallRuntime(Runtime::kTraceEnter, 0);
|
||||
}
|
||||
|
||||
{ Comment cmnt(masm_, "[ Stack check");
|
||||
PrepareForBailout(info->function(), NO_REGISTERS);
|
||||
@ -241,9 +241,11 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
|
||||
VisitStatements(function()->body());
|
||||
ASSERT(loop_depth() == 0);
|
||||
}
|
||||
}
|
||||
|
||||
// Always emit a 'return undefined' in case control fell off the end of
|
||||
// the body.
|
||||
{ Comment cmnt(masm_, "[ return <undefined>;");
|
||||
// Emit a 'return undefined' in case control fell off the end of the body.
|
||||
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
|
||||
EmitReturnSequence();
|
||||
}
|
||||
@ -267,6 +269,13 @@ void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt) {
|
||||
// the deoptimization input data found in the optimized code.
|
||||
RecordStackCheck(stmt->OsrEntryId());
|
||||
|
||||
// Loop stack checks can be patched to perform on-stack replacement. In
|
||||
// order to decide whether or not to perform OSR we embed the loop depth
|
||||
// in a test instruction after the call so we can extract it from the OSR
|
||||
// builtin.
|
||||
ASSERT(loop_depth() > 0);
|
||||
__ testl(rax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker)));
|
||||
|
||||
__ bind(&ok);
|
||||
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
|
||||
// Record a mapping of the OSR id to this PC. This is used if the OSR
|
||||
@ -318,13 +327,6 @@ void FullCodeGenerator::EmitReturnSequence() {
|
||||
}
|
||||
|
||||
|
||||
FullCodeGenerator::ConstantOperand FullCodeGenerator::GetConstantOperand(
|
||||
Token::Value op, Expression* left, Expression* right) {
|
||||
ASSERT(ShouldInlineSmiCase(op));
|
||||
return kNoConstants;
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::EffectContext::Plug(Slot* slot) const {
|
||||
}
|
||||
|
||||
@ -543,7 +545,7 @@ void FullCodeGenerator::DoTest(Label* if_true,
|
||||
__ j(equal, if_true);
|
||||
__ CompareRoot(result_register(), Heap::kFalseValueRootIndex);
|
||||
__ j(equal, if_false);
|
||||
ASSERT_EQ(0, kSmiTag);
|
||||
STATIC_ASSERT(kSmiTag == 0);
|
||||
__ SmiCompare(result_register(), Smi::FromInt(0));
|
||||
__ j(equal, if_false);
|
||||
Condition is_smi = masm_->CheckSmi(result_register());
|
||||
@ -733,7 +735,9 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
|
||||
prop->key()->AsLiteral()->handle()->IsSmi());
|
||||
__ Move(rcx, prop->key()->AsLiteral()->handle());
|
||||
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
|
||||
Handle<Code> ic(Builtins::builtin(is_strict()
|
||||
? Builtins::KeyedStoreIC_Initialize_Strict
|
||||
: Builtins::KeyedStoreIC_Initialize));
|
||||
EmitCallIC(ic, RelocInfo::CODE_TARGET);
|
||||
}
|
||||
}
|
||||
@ -750,7 +754,8 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
|
||||
__ push(rsi); // The context is the first argument.
|
||||
__ Push(pairs);
|
||||
__ Push(Smi::FromInt(is_eval() ? 1 : 0));
|
||||
__ CallRuntime(Runtime::kDeclareGlobals, 3);
|
||||
__ Push(Smi::FromInt(strict_mode_flag()));
|
||||
__ CallRuntime(Runtime::kDeclareGlobals, 4);
|
||||
// Return value is ignored.
|
||||
}
|
||||
|
||||
@ -851,7 +856,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
|
||||
VisitForAccumulatorValue(stmt->enumerable());
|
||||
__ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
|
||||
__ j(equal, &exit);
|
||||
__ CompareRoot(rax, Heap::kNullValueRootIndex);
|
||||
Register null_value = rdi;
|
||||
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
|
||||
__ cmpq(rax, null_value);
|
||||
__ j(equal, &exit);
|
||||
|
||||
// Convert the object to a JS object.
|
||||
@ -865,12 +872,61 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
|
||||
__ bind(&done_convert);
|
||||
__ push(rax);
|
||||
|
||||
// BUG(867): Check cache validity in generated code. This is a fast
|
||||
// case for the JSObject::IsSimpleEnum cache validity checks. If we
|
||||
// cannot guarantee cache validity, call the runtime system to check
|
||||
// cache validity or get the property names in a fixed array.
|
||||
// Check cache validity in generated code. This is a fast case for
|
||||
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
|
||||
// guarantee cache validity, call the runtime system to check cache
|
||||
// validity or get the property names in a fixed array.
|
||||
Label next, call_runtime;
|
||||
Register empty_fixed_array_value = r8;
|
||||
__ LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
|
||||
Register empty_descriptor_array_value = r9;
|
||||
__ LoadRoot(empty_descriptor_array_value,
|
||||
Heap::kEmptyDescriptorArrayRootIndex);
|
||||
__ movq(rcx, rax);
|
||||
__ bind(&next);
|
||||
|
||||
// Check that there are no elements. Register rcx contains the
|
||||
// current JS object we've reached through the prototype chain.
|
||||
__ cmpq(empty_fixed_array_value,
|
||||
FieldOperand(rcx, JSObject::kElementsOffset));
|
||||
__ j(not_equal, &call_runtime);
|
||||
|
||||
// Check that instance descriptors are not empty so that we can
|
||||
// check for an enum cache. Leave the map in rbx for the subsequent
|
||||
// prototype load.
|
||||
__ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset));
|
||||
__ movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOffset));
|
||||
__ cmpq(rdx, empty_descriptor_array_value);
|
||||
__ j(equal, &call_runtime);
|
||||
|
||||
// Check that there is an enum cache in the non-empty instance
|
||||
// descriptors (rdx). This is the case if the next enumeration
|
||||
// index field does not contain a smi.
|
||||
__ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumerationIndexOffset));
|
||||
__ JumpIfSmi(rdx, &call_runtime);
|
||||
|
||||
// For all objects but the receiver, check that the cache is empty.
|
||||
NearLabel check_prototype;
|
||||
__ cmpq(rcx, rax);
|
||||
__ j(equal, &check_prototype);
|
||||
__ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset));
|
||||
__ cmpq(rdx, empty_fixed_array_value);
|
||||
__ j(not_equal, &call_runtime);
|
||||
|
||||
// Load the prototype from the map and loop if non-null.
|
||||
__ bind(&check_prototype);
|
||||
__ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
|
||||
__ cmpq(rcx, null_value);
|
||||
__ j(not_equal, &next);
|
||||
|
||||
// The enum cache is valid. Load the map of the object being
|
||||
// iterated over and use the cache for the iteration.
|
||||
NearLabel use_cache;
|
||||
__ movq(rax, FieldOperand(rax, HeapObject::kMapOffset));
|
||||
__ jmp(&use_cache);
|
||||
|
||||
// Get the set of properties to enumerate.
|
||||
__ bind(&call_runtime);
|
||||
__ push(rax); // Duplicate the enumerable object on the stack.
|
||||
__ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
|
||||
|
||||
@ -883,6 +939,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
|
||||
__ j(not_equal, &fixed_array);
|
||||
|
||||
// We got a map in register rax. Get the enumeration cache from it.
|
||||
__ bind(&use_cache);
|
||||
__ movq(rcx, FieldOperand(rax, Map::kInstanceDescriptorsOffset));
|
||||
__ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset));
|
||||
__ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset));
|
||||
@ -971,8 +1028,14 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
|
||||
void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
|
||||
bool pretenure) {
|
||||
// Use the fast case closure allocation code that allocates in new
|
||||
// space for nested functions that don't need literals cloning.
|
||||
if (scope()->is_function_scope() &&
|
||||
// space for nested functions that don't need literals cloning. If
|
||||
// we're running with the --always-opt or the --prepare-always-opt
|
||||
// flag, we need to use the runtime function so that the new function
|
||||
// we are creating here gets a chance to have its code optimized and
|
||||
// doesn't just get a copy of the existing unoptimized code.
|
||||
if (!FLAG_always_opt &&
|
||||
!FLAG_prepare_always_opt &&
|
||||
scope()->is_function_scope() &&
|
||||
info->num_literals() == 0 &&
|
||||
!pretenure) {
|
||||
FastNewClosureStub stub;
|
||||
@ -1082,8 +1145,11 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(
|
||||
// Check that last extension is NULL.
|
||||
__ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
|
||||
__ j(not_equal, slow);
|
||||
__ movq(temp, ContextOperand(context, Context::FCONTEXT_INDEX));
|
||||
return ContextOperand(temp, slot->index());
|
||||
|
||||
// This function is used only for loads, not stores, so it's safe to
|
||||
// return an rsi-based operand (the write barrier cannot be allowed to
|
||||
// destroy the rsi register).
|
||||
return ContextOperand(context, slot->index());
|
||||
}
|
||||
|
||||
|
||||
@ -1333,7 +1399,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
|
||||
VisitForStackValue(key);
|
||||
VisitForStackValue(value);
|
||||
if (property->emit_store()) {
|
||||
__ CallRuntime(Runtime::kSetProperty, 3);
|
||||
__ Push(Smi::FromInt(NONE)); // PropertyAttributes
|
||||
__ CallRuntime(Runtime::kSetProperty, 4);
|
||||
} else {
|
||||
__ Drop(3);
|
||||
}
|
||||
@ -1509,14 +1576,8 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
|
||||
}
|
||||
|
||||
Token::Value op = expr->binary_op();
|
||||
ConstantOperand constant = ShouldInlineSmiCase(op)
|
||||
? GetConstantOperand(op, expr->target(), expr->value())
|
||||
: kNoConstants;
|
||||
ASSERT(constant == kRightConstant || constant == kNoConstants);
|
||||
if (constant == kNoConstants) {
|
||||
__ push(rax); // Left operand goes on the stack.
|
||||
VisitForAccumulatorValue(expr->value());
|
||||
}
|
||||
|
||||
OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
|
||||
? OVERWRITE_RIGHT
|
||||
@ -1528,8 +1589,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
|
||||
op,
|
||||
mode,
|
||||
expr->target(),
|
||||
expr->value(),
|
||||
constant);
|
||||
expr->value());
|
||||
} else {
|
||||
EmitBinaryOp(op, mode);
|
||||
}
|
||||
@ -1580,10 +1640,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(Expression* expr,
|
||||
Token::Value op,
|
||||
OverwriteMode mode,
|
||||
Expression* left,
|
||||
Expression* right,
|
||||
ConstantOperand constant) {
|
||||
ASSERT(constant == kNoConstants); // Only handled case.
|
||||
|
||||
Expression* right) {
|
||||
// Do combined smi check of the operands. Left operand is on the
|
||||
// stack (popped into rdx). Right operand is in rax but moved into
|
||||
// rcx to make the shifts easier.
|
||||
@ -1680,7 +1737,9 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
|
||||
__ movq(rdx, rax);
|
||||
__ pop(rax); // Restore value.
|
||||
__ Move(rcx, prop->key()->AsLiteral()->handle());
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
|
||||
Handle<Code> ic(Builtins::builtin(
|
||||
is_strict() ? Builtins::StoreIC_Initialize_Strict
|
||||
: Builtins::StoreIC_Initialize));
|
||||
EmitCallIC(ic, RelocInfo::CODE_TARGET);
|
||||
break;
|
||||
}
|
||||
@ -1701,7 +1760,9 @@ void FullCodeGenerator::EmitAssignment(Expression* expr, int bailout_ast_id) {
|
||||
__ pop(rdx);
|
||||
}
|
||||
__ pop(rax); // Restore value.
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
|
||||
Handle<Code> ic(Builtins::builtin(
|
||||
is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
|
||||
: Builtins::KeyedStoreIC_Initialize));
|
||||
EmitCallIC(ic, RelocInfo::CODE_TARGET);
|
||||
break;
|
||||
}
|
||||
@ -1730,57 +1791,76 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
|
||||
: Builtins::StoreIC_Initialize));
|
||||
EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
|
||||
|
||||
} else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) {
|
||||
// Perform the assignment for non-const variables and for initialization
|
||||
// of const variables. Const assignments are simply skipped.
|
||||
Label done;
|
||||
} else if (op == Token::INIT_CONST) {
|
||||
// Like var declarations, const declarations are hoisted to function
|
||||
// scope. However, unlike var initializers, const initializers are able
|
||||
// to drill a hole to that function context, even from inside a 'with'
|
||||
// context. We thus bypass the normal static scope lookup.
|
||||
Slot* slot = var->AsSlot();
|
||||
Label skip;
|
||||
switch (slot->type()) {
|
||||
case Slot::PARAMETER:
|
||||
// No const parameters.
|
||||
UNREACHABLE();
|
||||
break;
|
||||
case Slot::LOCAL:
|
||||
__ movq(rdx, Operand(rbp, SlotOffset(slot)));
|
||||
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
|
||||
__ j(not_equal, &skip);
|
||||
__ movq(Operand(rbp, SlotOffset(slot)), rax);
|
||||
break;
|
||||
case Slot::CONTEXT: {
|
||||
__ movq(rcx, ContextOperand(rsi, Context::FCONTEXT_INDEX));
|
||||
__ movq(rdx, ContextOperand(rcx, slot->index()));
|
||||
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
|
||||
__ j(not_equal, &skip);
|
||||
__ movq(ContextOperand(rcx, slot->index()), rax);
|
||||
int offset = Context::SlotOffset(slot->index());
|
||||
__ movq(rdx, rax); // Preserve the stored value in eax.
|
||||
__ RecordWrite(rcx, offset, rdx, rbx);
|
||||
break;
|
||||
}
|
||||
case Slot::LOOKUP:
|
||||
__ push(rax);
|
||||
__ push(rsi);
|
||||
__ Push(var->name());
|
||||
__ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
|
||||
break;
|
||||
}
|
||||
__ bind(&skip);
|
||||
|
||||
} else if (var->mode() != Variable::CONST) {
|
||||
// Perform the assignment for non-const variables. Const assignments
|
||||
// are simply skipped.
|
||||
Slot* slot = var->AsSlot();
|
||||
switch (slot->type()) {
|
||||
case Slot::PARAMETER:
|
||||
case Slot::LOCAL:
|
||||
if (op == Token::INIT_CONST) {
|
||||
// Detect const reinitialization by checking for the hole value.
|
||||
__ movq(rdx, Operand(rbp, SlotOffset(slot)));
|
||||
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
|
||||
__ j(not_equal, &done);
|
||||
}
|
||||
// Perform the assignment.
|
||||
__ movq(Operand(rbp, SlotOffset(slot)), rax);
|
||||
break;
|
||||
|
||||
case Slot::CONTEXT: {
|
||||
MemOperand target = EmitSlotSearch(slot, rcx);
|
||||
if (op == Token::INIT_CONST) {
|
||||
// Detect const reinitialization by checking for the hole value.
|
||||
__ movq(rdx, target);
|
||||
__ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
|
||||
__ j(not_equal, &done);
|
||||
}
|
||||
// Perform the assignment and issue the write barrier.
|
||||
__ movq(target, rax);
|
||||
// The value of the assignment is in rax. RecordWrite clobbers its
|
||||
// register arguments.
|
||||
__ movq(rdx, rax);
|
||||
int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
|
||||
int offset = Context::SlotOffset(slot->index());
|
||||
__ RecordWrite(rcx, offset, rdx, rbx);
|
||||
break;
|
||||
}
|
||||
|
||||
case Slot::LOOKUP:
|
||||
// Call the runtime for the assignment. The runtime will ignore
|
||||
// const reinitialization.
|
||||
// Call the runtime for the assignment.
|
||||
__ push(rax); // Value.
|
||||
__ push(rsi); // Context.
|
||||
__ Push(var->name());
|
||||
if (op == Token::INIT_CONST) {
|
||||
// The runtime will ignore const redeclaration.
|
||||
__ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
|
||||
} else {
|
||||
__ CallRuntime(Runtime::kStoreContextSlot, 3);
|
||||
}
|
||||
__ Push(Smi::FromInt(strict_mode_flag()));
|
||||
__ CallRuntime(Runtime::kStoreContextSlot, 4);
|
||||
break;
|
||||
}
|
||||
__ bind(&done);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1809,7 +1889,9 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
|
||||
} else {
|
||||
__ pop(rdx);
|
||||
}
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
|
||||
Handle<Code> ic(Builtins::builtin(
|
||||
is_strict() ? Builtins::StoreIC_Initialize_Strict
|
||||
: Builtins::StoreIC_Initialize));
|
||||
EmitCallIC(ic, RelocInfo::CODE_TARGET);
|
||||
|
||||
// If the assignment ends an initialization block, revert to fast case.
|
||||
@ -1847,7 +1929,9 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
|
||||
}
|
||||
// Record source code position before IC call.
|
||||
SetSourcePosition(expr->position());
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
|
||||
Handle<Code> ic(Builtins::builtin(
|
||||
is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
|
||||
: Builtins::KeyedStoreIC_Initialize));
|
||||
EmitCallIC(ic, RelocInfo::CODE_TARGET);
|
||||
|
||||
// If the assignment ends an initialization block, revert to fast case.
|
||||
@ -1963,6 +2047,27 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) {
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::EmitResolvePossiblyDirectEval(ResolveEvalFlag flag,
|
||||
int arg_count) {
|
||||
// Push copy of the first argument or undefined if it doesn't exist.
|
||||
if (arg_count > 0) {
|
||||
__ push(Operand(rsp, arg_count * kPointerSize));
|
||||
} else {
|
||||
__ PushRoot(Heap::kUndefinedValueRootIndex);
|
||||
}
|
||||
|
||||
// Push the receiver of the enclosing function and do runtime call.
|
||||
__ push(Operand(rbp, (2 + scope()->num_parameters()) * kPointerSize));
|
||||
|
||||
// Push the strict mode flag.
|
||||
__ Push(Smi::FromInt(strict_mode_flag()));
|
||||
|
||||
__ CallRuntime(flag == SKIP_CONTEXT_LOOKUP
|
||||
? Runtime::kResolvePossiblyDirectEvalNoLookup
|
||||
: Runtime::kResolvePossiblyDirectEval, 4);
|
||||
}
|
||||
|
||||
|
||||
void FullCodeGenerator::VisitCall(Call* expr) {
|
||||
#ifdef DEBUG
|
||||
// We want to verify that RecordJSReturnSite gets called on all paths
|
||||
@ -1990,21 +2095,30 @@ void FullCodeGenerator::VisitCall(Call* expr) {
|
||||
VisitForStackValue(args->at(i));
|
||||
}
|
||||
|
||||
// Push copy of the function - found below the arguments.
|
||||
__ push(Operand(rsp, (arg_count + 1) * kPointerSize));
|
||||
|
||||
// Push copy of the first argument or undefined if it doesn't exist.
|
||||
if (arg_count > 0) {
|
||||
__ push(Operand(rsp, arg_count * kPointerSize));
|
||||
} else {
|
||||
__ PushRoot(Heap::kUndefinedValueRootIndex);
|
||||
// If we know that eval can only be shadowed by eval-introduced
|
||||
// variables we attempt to load the global eval function directly
|
||||
// in generated code. If we succeed, there is no need to perform a
|
||||
// context lookup in the runtime system.
|
||||
Label done;
|
||||
if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) {
|
||||
Label slow;
|
||||
EmitLoadGlobalSlotCheckExtensions(var->AsSlot(),
|
||||
NOT_INSIDE_TYPEOF,
|
||||
&slow);
|
||||
// Push the function and resolve eval.
|
||||
__ push(rax);
|
||||
EmitResolvePossiblyDirectEval(SKIP_CONTEXT_LOOKUP, arg_count);
|
||||
__ jmp(&done);
|
||||
__ bind(&slow);
|
||||
}
|
||||
|
||||
// Push the receiver of the enclosing function and do runtime call.
|
||||
__ push(Operand(rbp, (2 + scope()->num_parameters()) * kPointerSize));
|
||||
// Push the strict mode flag.
|
||||
__ Push(Smi::FromInt(strict_mode_flag()));
|
||||
__ CallRuntime(Runtime::kResolvePossiblyDirectEval, 4);
|
||||
// Push copy of the function (found below the arguments) and
|
||||
// resolve eval.
|
||||
__ push(Operand(rsp, (arg_count + 1) * kPointerSize));
|
||||
EmitResolvePossiblyDirectEval(PERFORM_CONTEXT_LOOKUP, arg_count);
|
||||
if (done.is_linked()) {
|
||||
__ bind(&done);
|
||||
}
|
||||
|
||||
// The runtime call returns a pair of values in rax (function) and
|
||||
// rdx (receiver). Touch up the stack with the right values.
|
||||
@ -2621,7 +2735,8 @@ void FullCodeGenerator::EmitMathPow(ZoneList<Expression*>* args) {
|
||||
ASSERT(args->length() == 2);
|
||||
VisitForStackValue(args->at(0));
|
||||
VisitForStackValue(args->at(1));
|
||||
__ CallRuntime(Runtime::kMath_pow, 2);
|
||||
MathPowStub stub;
|
||||
__ CallStub(&stub);
|
||||
context()->Plug(rax);
|
||||
}
|
||||
|
||||
@ -2805,7 +2920,8 @@ void FullCodeGenerator::EmitStringCompare(ZoneList<Expression*>* args) {
|
||||
|
||||
void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
|
||||
// Load the argument on the stack and call the stub.
|
||||
TranscendentalCacheStub stub(TranscendentalCache::SIN);
|
||||
TranscendentalCacheStub stub(TranscendentalCache::SIN,
|
||||
TranscendentalCacheStub::TAGGED);
|
||||
ASSERT(args->length() == 1);
|
||||
VisitForStackValue(args->at(0));
|
||||
__ CallStub(&stub);
|
||||
@ -2815,7 +2931,8 @@ void FullCodeGenerator::EmitMathSin(ZoneList<Expression*>* args) {
|
||||
|
||||
void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
|
||||
// Load the argument on the stack and call the stub.
|
||||
TranscendentalCacheStub stub(TranscendentalCache::COS);
|
||||
TranscendentalCacheStub stub(TranscendentalCache::COS,
|
||||
TranscendentalCacheStub::TAGGED);
|
||||
ASSERT(args->length() == 1);
|
||||
VisitForStackValue(args->at(0));
|
||||
__ CallStub(&stub);
|
||||
@ -2825,7 +2942,8 @@ void FullCodeGenerator::EmitMathCos(ZoneList<Expression*>* args) {
|
||||
|
||||
void FullCodeGenerator::EmitMathLog(ZoneList<Expression*>* args) {
|
||||
// Load the argument on the stack and call the stub.
|
||||
TranscendentalCacheStub stub(TranscendentalCache::LOG);
|
||||
TranscendentalCacheStub stub(TranscendentalCache::LOG,
|
||||
TranscendentalCacheStub::TAGGED);
|
||||
ASSERT(args->length() == 1);
|
||||
VisitForStackValue(args->at(0));
|
||||
__ CallStub(&stub);
|
||||
@ -2877,7 +2995,73 @@ void FullCodeGenerator::EmitSwapElements(ZoneList<Expression*>* args) {
|
||||
VisitForStackValue(args->at(0));
|
||||
VisitForStackValue(args->at(1));
|
||||
VisitForStackValue(args->at(2));
|
||||
Label done;
|
||||
Label slow_case;
|
||||
Register object = rax;
|
||||
Register index_1 = rbx;
|
||||
Register index_2 = rcx;
|
||||
Register elements = rdi;
|
||||
Register temp = rdx;
|
||||
__ movq(object, Operand(rsp, 2 * kPointerSize));
|
||||
// Fetch the map and check if array is in fast case.
|
||||
// Check that object doesn't require security checks and
|
||||
// has no indexed interceptor.
|
||||
__ CmpObjectType(object, FIRST_JS_OBJECT_TYPE, temp);
|
||||
__ j(below, &slow_case);
|
||||
__ testb(FieldOperand(temp, Map::kBitFieldOffset),
|
||||
Immediate(KeyedLoadIC::kSlowCaseBitFieldMask));
|
||||
__ j(not_zero, &slow_case);
|
||||
|
||||
// Check the object's elements are in fast case and writable.
|
||||
__ movq(elements, FieldOperand(object, JSObject::kElementsOffset));
|
||||
__ CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
|
||||
Heap::kFixedArrayMapRootIndex);
|
||||
__ j(not_equal, &slow_case);
|
||||
|
||||
// Check that both indices are smis.
|
||||
__ movq(index_1, Operand(rsp, 1 * kPointerSize));
|
||||
__ movq(index_2, Operand(rsp, 0 * kPointerSize));
|
||||
__ JumpIfNotBothSmi(index_1, index_2, &slow_case);
|
||||
|
||||
// Check that both indices are valid.
|
||||
// The JSArray length field is a smi since the array is in fast case mode.
|
||||
__ movq(temp, FieldOperand(object, JSArray::kLengthOffset));
|
||||
__ SmiCompare(temp, index_1);
|
||||
__ j(below_equal, &slow_case);
|
||||
__ SmiCompare(temp, index_2);
|
||||
__ j(below_equal, &slow_case);
|
||||
|
||||
__ SmiToInteger32(index_1, index_1);
|
||||
__ SmiToInteger32(index_2, index_2);
|
||||
// Bring addresses into index1 and index2.
|
||||
__ lea(index_1, FieldOperand(elements, index_1, times_pointer_size,
|
||||
FixedArray::kHeaderSize));
|
||||
__ lea(index_2, FieldOperand(elements, index_2, times_pointer_size,
|
||||
FixedArray::kHeaderSize));
|
||||
|
||||
// Swap elements. Use object and temp as scratch registers.
|
||||
__ movq(object, Operand(index_1, 0));
|
||||
__ movq(temp, Operand(index_2, 0));
|
||||
__ movq(Operand(index_2, 0), object);
|
||||
__ movq(Operand(index_1, 0), temp);
|
||||
|
||||
Label new_space;
|
||||
__ InNewSpace(elements, temp, equal, &new_space);
|
||||
|
||||
__ movq(object, elements);
|
||||
__ RecordWriteHelper(object, index_1, temp);
|
||||
__ RecordWriteHelper(elements, index_2, temp);
|
||||
|
||||
__ bind(&new_space);
|
||||
// We are done. Drop elements from the stack, and return undefined.
|
||||
__ addq(rsp, Immediate(3 * kPointerSize));
|
||||
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
|
||||
__ jmp(&done);
|
||||
|
||||
__ bind(&slow_case);
|
||||
__ CallRuntime(Runtime::kSwapElements, 3);
|
||||
|
||||
__ bind(&done);
|
||||
context()->Plug(rax);
|
||||
}
|
||||
|
||||
@ -3000,9 +3184,12 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(ZoneList<Expression*>* args) {
|
||||
|
||||
void FullCodeGenerator::EmitGetCachedArrayIndex(ZoneList<Expression*>* args) {
|
||||
ASSERT(args->length() == 1);
|
||||
|
||||
VisitForAccumulatorValue(args->at(0));
|
||||
|
||||
if (FLAG_debug_code) {
|
||||
__ AbortIfNotString(rax);
|
||||
}
|
||||
|
||||
__ movl(rax, FieldOperand(rax, String::kHashFieldOffset));
|
||||
ASSERT(String::kHashShift >= kSmiTagSize);
|
||||
__ IndexFromHash(rax, rax);
|
||||
@ -3355,7 +3542,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
|
||||
case NAMED_PROPERTY: {
|
||||
__ Move(rcx, prop->key()->AsLiteral()->handle());
|
||||
__ pop(rdx);
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
|
||||
Handle<Code> ic(Builtins::builtin(
|
||||
is_strict() ? Builtins::StoreIC_Initialize_Strict
|
||||
: Builtins::StoreIC_Initialize));
|
||||
EmitCallIC(ic, RelocInfo::CODE_TARGET);
|
||||
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
|
||||
if (expr->is_postfix()) {
|
||||
@ -3370,7 +3559,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
|
||||
case KEYED_PROPERTY: {
|
||||
__ pop(rcx);
|
||||
__ pop(rdx);
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
|
||||
Handle<Code> ic(Builtins::builtin(
|
||||
is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
|
||||
: Builtins::KeyedStoreIC_Initialize));
|
||||
EmitCallIC(ic, RelocInfo::CODE_TARGET);
|
||||
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
|
||||
if (expr->is_postfix()) {
|
||||
@ -3715,6 +3906,22 @@ void FullCodeGenerator::EmitCallIC(Handle<Code> ic, RelocInfo::Mode mode) {
|
||||
|
||||
|
||||
void FullCodeGenerator::EmitCallIC(Handle<Code> ic, JumpPatchSite* patch_site) {
|
||||
switch (ic->kind()) {
|
||||
case Code::LOAD_IC:
|
||||
__ IncrementCounter(&Counters::named_load_full, 1);
|
||||
break;
|
||||
case Code::KEYED_LOAD_IC:
|
||||
__ IncrementCounter(&Counters::keyed_load_full, 1);
|
||||
break;
|
||||
case Code::STORE_IC:
|
||||
__ IncrementCounter(&Counters::named_store_full, 1);
|
||||
break;
|
||||
case Code::KEYED_STORE_IC:
|
||||
__ IncrementCounter(&Counters::keyed_store_full, 1);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
__ call(ic, RelocInfo::CODE_TARGET);
|
||||
if (patch_site != NULL && patch_site->is_bound()) {
|
||||
patch_site->EmitPatchInfo();
|
||||
|
27
deps/v8/src/x64/ic-x64.cc
vendored
27
deps/v8/src/x64/ic-x64.cc
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||
// Copyright 2011 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
@ -766,7 +766,8 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
|
||||
}
|
||||
|
||||
|
||||
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
|
||||
StrictModeFlag strict_mode) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- rax : value
|
||||
// -- rcx : key
|
||||
@ -813,7 +814,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
|
||||
__ bind(&slow);
|
||||
__ Integer32ToSmi(rcx, rcx);
|
||||
__ bind(&slow_with_tagged_index);
|
||||
GenerateRuntimeSetProperty(masm);
|
||||
GenerateRuntimeSetProperty(masm, strict_mode);
|
||||
// Never returns to here.
|
||||
|
||||
// Check whether the elements is a pixel array.
|
||||
@ -1474,7 +1475,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
|
||||
|
||||
|
||||
void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
|
||||
Code::ExtraICState extra_ic_state) {
|
||||
StrictModeFlag strict_mode) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- rax : value
|
||||
// -- rcx : name
|
||||
@ -1486,7 +1487,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
|
||||
Code::Flags flags = Code::ComputeFlags(Code::STORE_IC,
|
||||
NOT_IN_LOOP,
|
||||
MONOMORPHIC,
|
||||
extra_ic_state);
|
||||
strict_mode);
|
||||
StubCache::GenerateProbe(masm, flags, rdx, rcx, rbx, no_reg);
|
||||
|
||||
// Cache miss: Jump to runtime.
|
||||
@ -1593,7 +1594,8 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) {
|
||||
}
|
||||
|
||||
|
||||
void StoreIC::GenerateGlobalProxy(MacroAssembler* masm) {
|
||||
void StoreIC::GenerateGlobalProxy(MacroAssembler* masm,
|
||||
StrictModeFlag strict_mode) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- rax : value
|
||||
// -- rcx : name
|
||||
@ -1604,14 +1606,17 @@ void StoreIC::GenerateGlobalProxy(MacroAssembler* masm) {
|
||||
__ push(rdx);
|
||||
__ push(rcx);
|
||||
__ push(rax);
|
||||
__ push(rbx);
|
||||
__ Push(Smi::FromInt(NONE)); // PropertyAttributes
|
||||
__ Push(Smi::FromInt(strict_mode));
|
||||
__ push(rbx); // return address
|
||||
|
||||
// Do tail-call to runtime routine.
|
||||
__ TailCallRuntime(Runtime::kSetProperty, 3, 1);
|
||||
__ TailCallRuntime(Runtime::kSetProperty, 5, 1);
|
||||
}
|
||||
|
||||
|
||||
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
|
||||
void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
|
||||
StrictModeFlag strict_mode) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- rax : value
|
||||
// -- rcx : key
|
||||
@ -1623,10 +1628,12 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
|
||||
__ push(rdx); // receiver
|
||||
__ push(rcx); // key
|
||||
__ push(rax); // value
|
||||
__ Push(Smi::FromInt(NONE)); // PropertyAttributes
|
||||
__ Push(Smi::FromInt(strict_mode)); // Strict mode.
|
||||
__ push(rbx); // return address
|
||||
|
||||
// Do tail-call to runtime routine.
|
||||
__ TailCallRuntime(Runtime::kSetProperty, 3, 1);
|
||||
__ TailCallRuntime(Runtime::kSetProperty, 5, 1);
|
||||
}
|
||||
|
||||
|
||||
|
2
deps/v8/src/x64/jump-target-x64.cc
vendored
2
deps/v8/src/x64/jump-target-x64.cc
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2009 the V8 project authors. All rights reserved.
|
||||
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
|
241
deps/v8/src/x64/lithium-codegen-x64.cc
vendored
241
deps/v8/src/x64/lithium-codegen-x64.cc
vendored
@ -77,6 +77,7 @@ bool LCodeGen::GenerateCode() {
|
||||
return GeneratePrologue() &&
|
||||
GenerateBody() &&
|
||||
GenerateDeferredCode() &&
|
||||
GenerateJumpTable() &&
|
||||
GenerateSafepointTable();
|
||||
}
|
||||
|
||||
@ -240,6 +241,16 @@ LInstruction* LCodeGen::GetNextInstruction() {
|
||||
}
|
||||
|
||||
|
||||
bool LCodeGen::GenerateJumpTable() {
|
||||
for (int i = 0; i < jump_table_.length(); i++) {
|
||||
JumpTableEntry* info = jump_table_[i];
|
||||
__ bind(&(info->label_));
|
||||
__ Jump(info->address_, RelocInfo::RUNTIME_ENTRY);
|
||||
}
|
||||
return !is_aborted();
|
||||
}
|
||||
|
||||
|
||||
bool LCodeGen::GenerateDeferredCode() {
|
||||
ASSERT(is_generating());
|
||||
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
|
||||
@ -512,10 +523,17 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
|
||||
if (cc == no_condition) {
|
||||
__ Jump(entry, RelocInfo::RUNTIME_ENTRY);
|
||||
} else {
|
||||
NearLabel done;
|
||||
__ j(NegateCondition(cc), &done);
|
||||
__ Jump(entry, RelocInfo::RUNTIME_ENTRY);
|
||||
__ bind(&done);
|
||||
JumpTableEntry* jump_info = NULL;
|
||||
// We often have several deopts to the same entry, reuse the last
|
||||
// jump entry if this is the case.
|
||||
if (jump_table_.length() > 0 &&
|
||||
jump_table_[jump_table_.length() - 1]->address_ == entry) {
|
||||
jump_info = jump_table_[jump_table_.length() - 1];
|
||||
} else {
|
||||
jump_info = new JumpTableEntry(entry);
|
||||
jump_table_.Add(jump_info);
|
||||
}
|
||||
__ j(cc, &jump_info->label_);
|
||||
}
|
||||
}
|
||||
|
||||
@ -527,7 +545,8 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
|
||||
Handle<DeoptimizationInputData> data =
|
||||
Factory::NewDeoptimizationInputData(length, TENURED);
|
||||
|
||||
data->SetTranslationByteArray(*translations_.CreateByteArray());
|
||||
Handle<ByteArray> translations = translations_.CreateByteArray();
|
||||
data->SetTranslationByteArray(*translations);
|
||||
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
|
||||
|
||||
Handle<FixedArray> literals =
|
||||
@ -686,13 +705,13 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
|
||||
break;
|
||||
}
|
||||
case CodeStub::StringCharAt: {
|
||||
// TODO(1116): Add StringCharAt stub to x64.
|
||||
Abort("Unimplemented: %s", "StringCharAt Stub");
|
||||
StringCharAtStub stub;
|
||||
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
||||
break;
|
||||
}
|
||||
case CodeStub::MathPow: {
|
||||
// TODO(1115): Add MathPow stub to x64.
|
||||
Abort("Unimplemented: %s", "MathPow Stub");
|
||||
MathPowStub stub;
|
||||
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
||||
break;
|
||||
}
|
||||
case CodeStub::NumberToString: {
|
||||
@ -711,7 +730,8 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
|
||||
break;
|
||||
}
|
||||
case CodeStub::TranscendentalCache: {
|
||||
TranscendentalCacheStub stub(instr->transcendental_type());
|
||||
TranscendentalCacheStub stub(instr->transcendental_type(),
|
||||
TranscendentalCacheStub::TAGGED);
|
||||
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
||||
break;
|
||||
}
|
||||
@ -1579,7 +1599,20 @@ static Condition BranchCondition(HHasInstanceType* instr) {
|
||||
|
||||
|
||||
void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
|
||||
Abort("Unimplemented: %s", "DoHasInstanceType");
|
||||
Register input = ToRegister(instr->InputAt(0));
|
||||
Register result = ToRegister(instr->result());
|
||||
|
||||
ASSERT(instr->hydrogen()->value()->representation().IsTagged());
|
||||
__ testl(input, Immediate(kSmiTagMask));
|
||||
NearLabel done, is_false;
|
||||
__ j(zero, &is_false);
|
||||
__ CmpObjectType(input, TestType(instr->hydrogen()), result);
|
||||
__ j(NegateCondition(BranchCondition(instr->hydrogen())), &is_false);
|
||||
__ LoadRoot(result, Heap::kTrueValueRootIndex);
|
||||
__ jmp(&done);
|
||||
__ bind(&is_false);
|
||||
__ LoadRoot(result, Heap::kFalseValueRootIndex);
|
||||
__ bind(&done);
|
||||
}
|
||||
|
||||
|
||||
@ -1599,7 +1632,17 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
|
||||
|
||||
|
||||
void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
|
||||
Abort("Unimplemented: %s", "DoHasCachedArrayIndex");
|
||||
Register input = ToRegister(instr->InputAt(0));
|
||||
Register result = ToRegister(instr->result());
|
||||
|
||||
ASSERT(instr->hydrogen()->value()->representation().IsTagged());
|
||||
__ LoadRoot(result, Heap::kTrueValueRootIndex);
|
||||
__ testl(FieldOperand(input, String::kHashFieldOffset),
|
||||
Immediate(String::kContainsCachedArrayIndexMask));
|
||||
NearLabel done;
|
||||
__ j(not_zero, &done);
|
||||
__ LoadRoot(result, Heap::kFalseValueRootIndex);
|
||||
__ bind(&done);
|
||||
}
|
||||
|
||||
|
||||
@ -1795,9 +1838,7 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
|
||||
__ push(ToRegister(instr->InputAt(0)));
|
||||
__ Push(instr->function());
|
||||
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
|
||||
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
|
||||
RecordSafepointWithRegisters(
|
||||
instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
|
||||
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
||||
__ movq(kScratchRegister, rax);
|
||||
__ PopSafepointRegisters();
|
||||
__ testq(kScratchRegister, kScratchRegister);
|
||||
@ -2271,12 +2312,105 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
|
||||
|
||||
|
||||
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
|
||||
Abort("Unimplemented: %s", "DoDeferredMathAbsTaggedHeapNumber");
|
||||
Register input_reg = ToRegister(instr->InputAt(0));
|
||||
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
|
||||
Heap::kHeapNumberMapRootIndex);
|
||||
DeoptimizeIf(not_equal, instr->environment());
|
||||
|
||||
Label done;
|
||||
Register tmp = input_reg.is(rax) ? rcx : rax;
|
||||
Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx;
|
||||
|
||||
// Preserve the value of all registers.
|
||||
__ PushSafepointRegisters();
|
||||
|
||||
Label negative;
|
||||
__ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
|
||||
// Check the sign of the argument. If the argument is positive, just
|
||||
// return it. We do not need to patch the stack since |input| and
|
||||
// |result| are the same register and |input| will be restored
|
||||
// unchanged by popping safepoint registers.
|
||||
__ testl(tmp, Immediate(HeapNumber::kSignMask));
|
||||
__ j(not_zero, &negative);
|
||||
__ jmp(&done);
|
||||
|
||||
__ bind(&negative);
|
||||
|
||||
Label allocated, slow;
|
||||
__ AllocateHeapNumber(tmp, tmp2, &slow);
|
||||
__ jmp(&allocated);
|
||||
|
||||
// Slow case: Call the runtime system to do the number allocation.
|
||||
__ bind(&slow);
|
||||
|
||||
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
|
||||
RecordSafepointWithRegisters(
|
||||
instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
|
||||
// Set the pointer to the new heap number in tmp.
|
||||
if (!tmp.is(rax)) {
|
||||
__ movq(tmp, rax);
|
||||
}
|
||||
|
||||
// Restore input_reg after call to runtime.
|
||||
__ LoadFromSafepointRegisterSlot(input_reg, input_reg);
|
||||
|
||||
__ bind(&allocated);
|
||||
__ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
|
||||
__ shl(tmp2, Immediate(1));
|
||||
__ shr(tmp2, Immediate(1));
|
||||
__ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
|
||||
__ StoreToSafepointRegisterSlot(input_reg, tmp);
|
||||
|
||||
__ bind(&done);
|
||||
__ PopSafepointRegisters();
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
|
||||
Register input_reg = ToRegister(instr->InputAt(0));
|
||||
__ testl(input_reg, input_reg);
|
||||
Label is_positive;
|
||||
__ j(not_sign, &is_positive);
|
||||
__ negl(input_reg); // Sets flags.
|
||||
DeoptimizeIf(negative, instr->environment());
|
||||
__ bind(&is_positive);
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
|
||||
Abort("Unimplemented: %s", "DoMathAbs");
|
||||
// Class for deferred case.
|
||||
class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
|
||||
public:
|
||||
DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
|
||||
LUnaryMathOperation* instr)
|
||||
: LDeferredCode(codegen), instr_(instr) { }
|
||||
virtual void Generate() {
|
||||
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
|
||||
}
|
||||
private:
|
||||
LUnaryMathOperation* instr_;
|
||||
};
|
||||
|
||||
ASSERT(instr->InputAt(0)->Equals(instr->result()));
|
||||
Representation r = instr->hydrogen()->value()->representation();
|
||||
|
||||
if (r.IsDouble()) {
|
||||
XMMRegister scratch = xmm0;
|
||||
XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
|
||||
__ xorpd(scratch, scratch);
|
||||
__ subsd(scratch, input_reg);
|
||||
__ andpd(input_reg, scratch);
|
||||
} else if (r.IsInteger32()) {
|
||||
EmitIntegerMathAbs(instr);
|
||||
} else { // Tagged case.
|
||||
DeferredMathAbsTaggedHeapNumber* deferred =
|
||||
new DeferredMathAbsTaggedHeapNumber(this, instr);
|
||||
Register input_reg = ToRegister(instr->InputAt(0));
|
||||
// Smi check.
|
||||
__ JumpIfNotSmi(input_reg, deferred->entry());
|
||||
EmitIntegerMathAbs(instr);
|
||||
__ bind(deferred->exit());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -2355,22 +2489,78 @@ void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
|
||||
|
||||
|
||||
void LCodeGen::DoPower(LPower* instr) {
|
||||
Abort("Unimplemented: %s", "DoPower");
|
||||
LOperand* left = instr->InputAt(0);
|
||||
XMMRegister left_reg = ToDoubleRegister(left);
|
||||
ASSERT(!left_reg.is(xmm1));
|
||||
LOperand* right = instr->InputAt(1);
|
||||
XMMRegister result_reg = ToDoubleRegister(instr->result());
|
||||
Representation exponent_type = instr->hydrogen()->right()->representation();
|
||||
if (exponent_type.IsDouble()) {
|
||||
__ PrepareCallCFunction(2);
|
||||
// Move arguments to correct registers
|
||||
__ movsd(xmm0, left_reg);
|
||||
ASSERT(ToDoubleRegister(right).is(xmm1));
|
||||
__ CallCFunction(ExternalReference::power_double_double_function(), 2);
|
||||
} else if (exponent_type.IsInteger32()) {
|
||||
__ PrepareCallCFunction(2);
|
||||
// Move arguments to correct registers: xmm0 and edi (not rdi).
|
||||
// On Windows, the registers are xmm0 and edx.
|
||||
__ movsd(xmm0, left_reg);
|
||||
#ifdef _WIN64
|
||||
ASSERT(ToRegister(right).is(rdx));
|
||||
#else
|
||||
ASSERT(ToRegister(right).is(rdi));
|
||||
#endif
|
||||
__ CallCFunction(ExternalReference::power_double_int_function(), 2);
|
||||
} else {
|
||||
ASSERT(exponent_type.IsTagged());
|
||||
CpuFeatures::Scope scope(SSE2);
|
||||
Register right_reg = ToRegister(right);
|
||||
|
||||
Label non_smi, call;
|
||||
__ JumpIfNotSmi(right_reg, &non_smi);
|
||||
__ SmiToInteger32(right_reg, right_reg);
|
||||
__ cvtlsi2sd(xmm1, right_reg);
|
||||
__ jmp(&call);
|
||||
|
||||
__ bind(&non_smi);
|
||||
__ CmpObjectType(right_reg, HEAP_NUMBER_TYPE , kScratchRegister);
|
||||
DeoptimizeIf(not_equal, instr->environment());
|
||||
__ movsd(xmm1, FieldOperand(right_reg, HeapNumber::kValueOffset));
|
||||
|
||||
__ bind(&call);
|
||||
__ PrepareCallCFunction(2);
|
||||
// Move arguments to correct registers xmm0 and xmm1.
|
||||
__ movsd(xmm0, left_reg);
|
||||
// Right argument is already in xmm1.
|
||||
__ CallCFunction(ExternalReference::power_double_double_function(), 2);
|
||||
}
|
||||
// Return value is in xmm0.
|
||||
__ movsd(result_reg, xmm0);
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
|
||||
Abort("Unimplemented: %s", "DoMathLog");
|
||||
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
|
||||
TranscendentalCacheStub stub(TranscendentalCache::LOG,
|
||||
TranscendentalCacheStub::UNTAGGED);
|
||||
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
|
||||
Abort("Unimplemented: %s", "DoMathCos");
|
||||
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
|
||||
TranscendentalCacheStub stub(TranscendentalCache::LOG,
|
||||
TranscendentalCacheStub::UNTAGGED);
|
||||
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
|
||||
Abort("Unimplemented: %s", "DoMathSin");
|
||||
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
|
||||
TranscendentalCacheStub stub(TranscendentalCache::LOG,
|
||||
TranscendentalCacheStub::UNTAGGED);
|
||||
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
||||
}
|
||||
|
||||
|
||||
@ -2414,6 +2604,7 @@ void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
|
||||
int arity = instr->arity();
|
||||
Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
|
||||
CallCode(ic, RelocInfo::CODE_TARGET, instr);
|
||||
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
|
||||
}
|
||||
|
||||
|
||||
@ -2506,7 +2697,9 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
|
||||
ASSERT(ToRegister(instr->value()).is(rax));
|
||||
|
||||
__ Move(rcx, instr->hydrogen()->name());
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
|
||||
Handle<Code> ic(Builtins::builtin(
|
||||
info_->is_strict() ? Builtins::StoreIC_Initialize_Strict
|
||||
: Builtins::StoreIC_Initialize));
|
||||
CallCode(ic, RelocInfo::CODE_TARGET, instr);
|
||||
}
|
||||
|
||||
@ -2575,7 +2768,9 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
|
||||
ASSERT(ToRegister(instr->key()).is(rcx));
|
||||
ASSERT(ToRegister(instr->value()).is(rax));
|
||||
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
|
||||
Handle<Code> ic(Builtins::builtin(
|
||||
info_->is_strict() ? Builtins::KeyedStoreIC_Initialize_Strict
|
||||
: Builtins::KeyedStoreIC_Initialize));
|
||||
CallCode(ic, RelocInfo::CODE_TARGET, instr);
|
||||
}
|
||||
|
||||
|
12
deps/v8/src/x64/lithium-codegen-x64.h
vendored
12
deps/v8/src/x64/lithium-codegen-x64.h
vendored
@ -53,6 +53,7 @@ class LCodeGen BASE_EMBEDDED {
|
||||
current_instruction_(-1),
|
||||
instructions_(chunk->instructions()),
|
||||
deoptimizations_(4),
|
||||
jump_table_(4),
|
||||
deoptimization_literals_(8),
|
||||
inlined_function_count_(0),
|
||||
scope_(chunk->graph()->info()->scope()),
|
||||
@ -147,6 +148,7 @@ class LCodeGen BASE_EMBEDDED {
|
||||
bool GeneratePrologue();
|
||||
bool GenerateBody();
|
||||
bool GenerateDeferredCode();
|
||||
bool GenerateJumpTable();
|
||||
bool GenerateSafepointTable();
|
||||
|
||||
void CallCode(Handle<Code> code,
|
||||
@ -186,6 +188,7 @@ class LCodeGen BASE_EMBEDDED {
|
||||
XMMRegister ToDoubleRegister(int index) const;
|
||||
|
||||
// Specific math operations - used from DoUnaryMathOperation.
|
||||
void EmitIntegerMathAbs(LUnaryMathOperation* instr);
|
||||
void DoMathAbs(LUnaryMathOperation* instr);
|
||||
void DoMathFloor(LUnaryMathOperation* instr);
|
||||
void DoMathRound(LUnaryMathOperation* instr);
|
||||
@ -233,6 +236,14 @@ class LCodeGen BASE_EMBEDDED {
|
||||
// Emits code for pushing a constant operand.
|
||||
void EmitPushConstantOperand(LOperand* operand);
|
||||
|
||||
struct JumpTableEntry {
|
||||
inline JumpTableEntry(Address address)
|
||||
: label_(),
|
||||
address_(address) { }
|
||||
Label label_;
|
||||
Address address_;
|
||||
};
|
||||
|
||||
LChunk* const chunk_;
|
||||
MacroAssembler* const masm_;
|
||||
CompilationInfo* const info_;
|
||||
@ -241,6 +252,7 @@ class LCodeGen BASE_EMBEDDED {
|
||||
int current_instruction_;
|
||||
const ZoneList<LInstruction*>* instructions_;
|
||||
ZoneList<LEnvironment*> deoptimizations_;
|
||||
ZoneList<JumpTableEntry*> jump_table_;
|
||||
ZoneList<Handle<Object> > deoptimization_literals_;
|
||||
int inlined_function_count_;
|
||||
Scope* const scope_;
|
||||
|
34
deps/v8/src/x64/lithium-x64.cc
vendored
34
deps/v8/src/x64/lithium-x64.cc
vendored
@ -1158,9 +1158,8 @@ LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
|
||||
LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
|
||||
HInstanceOfKnownGlobal* instr) {
|
||||
LInstanceOfKnownGlobal* result =
|
||||
new LInstanceOfKnownGlobal(UseRegisterAtStart(instr->value()));
|
||||
MarkAsSaveDoubles(result);
|
||||
return AssignEnvironment(AssignPointerMap(DefineFixed(result, rax)));
|
||||
new LInstanceOfKnownGlobal(UseFixed(instr->value(), rax));
|
||||
return MarkAsCall(DefineFixed(result, rax), instr);
|
||||
}
|
||||
|
||||
|
||||
@ -1436,8 +1435,22 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoPower(HPower* instr) {
|
||||
Abort("Unimplemented: %s", "DoPower");
|
||||
return NULL;
|
||||
ASSERT(instr->representation().IsDouble());
|
||||
// We call a C function for double power. It can't trigger a GC.
|
||||
// We need to use fixed result register for the call.
|
||||
Representation exponent_type = instr->right()->representation();
|
||||
ASSERT(instr->left()->representation().IsDouble());
|
||||
LOperand* left = UseFixedDouble(instr->left(), xmm2);
|
||||
LOperand* right = exponent_type.IsDouble() ?
|
||||
UseFixedDouble(instr->right(), xmm1) :
|
||||
#ifdef _WIN64
|
||||
UseFixed(instr->right(), rdx);
|
||||
#else
|
||||
UseFixed(instr->right(), rdi);
|
||||
#endif
|
||||
LPower* result = new LPower(left, right);
|
||||
return MarkAsCall(DefineFixedDouble(result, xmm1), instr,
|
||||
CAN_DEOPTIMIZE_EAGERLY);
|
||||
}
|
||||
|
||||
|
||||
@ -1502,8 +1515,10 @@ LInstruction* LChunkBuilder::DoIsSmi(HIsSmi* instr) {
|
||||
|
||||
|
||||
LInstruction* LChunkBuilder::DoHasInstanceType(HHasInstanceType* instr) {
|
||||
Abort("Unimplemented: %s", "DoHasInstanceType");
|
||||
return NULL;
|
||||
ASSERT(instr->value()->representation().IsTagged());
|
||||
LOperand* value = UseRegisterAtStart(instr->value());
|
||||
|
||||
return DefineAsRegister(new LHasInstanceType(value));
|
||||
}
|
||||
|
||||
|
||||
@ -1516,8 +1531,9 @@ LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
|
||||
|
||||
LInstruction* LChunkBuilder::DoHasCachedArrayIndex(
|
||||
HHasCachedArrayIndex* instr) {
|
||||
Abort("Unimplemented: %s", "DoHasCachedArrayIndex");
|
||||
return NULL;
|
||||
ASSERT(instr->value()->representation().IsTagged());
|
||||
LOperand* value = UseRegister(instr->value());
|
||||
return DefineAsRegister(new LHasCachedArrayIndex(value));
|
||||
}
|
||||
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user