Merge branch 'v0.4'

This commit is contained in:
Ryan Dahl 2011-02-25 16:48:48 -08:00
commit 8838e14ac0
155 changed files with 7288 additions and 4021 deletions

2
deps/libeio/wscript vendored
View File

@ -125,5 +125,7 @@ def build(bld):
libeio.install_path = None
if bld.env["USE_DEBUG"]:
libeio.clone("debug");
if Options.options.product_type != 'program':
libeio.ccflags = "-fPIC"
bld.install_files('${PREFIX}/include/node/', 'eio.h');

13
deps/v8/ChangeLog vendored
View File

@ -1,3 +1,16 @@
2011-02-24: Version 3.1.6
Fixed a number of crash bugs.
Added support for Cygwin (issue 64).
Improved Crankshaft for x64 and ARM.
Added Crankshaft support for stores to pixel arrays.
Fixed issue in CPU profiler with Crankshaft.
2011-02-16: Version 3.1.5
Change RegExp parsing to disallow /(*)/.

1
deps/v8/SConstruct vendored
View File

@ -306,6 +306,7 @@ V8_EXTRA_FLAGS = {
'gcc': {
'all': {
'WARNINGFLAGS': ['-Wall',
'-Werror',
'-W',
'-Wno-unused-parameter',
'-Wnon-virtual-dtor']

View File

@ -154,6 +154,7 @@ SOURCES = {
arm/jump-target-arm.cc
arm/lithium-arm.cc
arm/lithium-codegen-arm.cc
arm/lithium-gap-resolver-arm.cc
arm/macro-assembler-arm.cc
arm/regexp-macro-assembler-arm.cc
arm/register-allocator-arm.cc

View File

@ -446,8 +446,15 @@ MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) {
bool found_it = false;
JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it);
if (!found_it) return Heap::undefined_value();
while (!function->should_have_prototype()) {
found_it = false;
function = FindInPrototypeChain<JSFunction>(object->GetPrototype(),
&found_it);
// There has to be one because we hit the getter.
ASSERT(found_it);
}
if (!function->has_prototype()) {
if (!function->should_have_prototype()) return Heap::undefined_value();
Object* prototype;
{ MaybeObject* maybe_prototype = Heap::AllocateFunctionPrototype(function);
if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
@ -467,6 +474,13 @@ MaybeObject* Accessors::FunctionSetPrototype(JSObject* object,
bool found_it = false;
JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it);
if (!found_it) return Heap::undefined_value();
if (!function->should_have_prototype()) {
// Since we hit this accessor, object will have no prototype property.
return object->SetLocalPropertyIgnoreAttributes(Heap::prototype_symbol(),
value,
NONE);
}
if (function->has_initial_map()) {
// If the function has allocated the initial map
// replace it with a copy containing the new prototype.

View File

@ -1848,11 +1848,31 @@ void Assembler::vldr(const DwVfpRegister dst,
offset = -offset;
u = 0;
}
ASSERT(offset % 4 == 0);
ASSERT((offset / 4) < 256);
ASSERT(offset >= 0);
emit(cond | u*B23 | 0xD1*B20 | base.code()*B16 | dst.code()*B12 |
0xB*B8 | ((offset / 4) & 255));
if ((offset % 4) == 0 && (offset / 4) < 256) {
emit(cond | u*B23 | 0xD1*B20 | base.code()*B16 | dst.code()*B12 |
0xB*B8 | ((offset / 4) & 255));
} else {
// Larger offsets must be handled by computing the correct address
// in the ip register.
ASSERT(!base.is(ip));
if (u == 1) {
add(ip, base, Operand(offset));
} else {
sub(ip, base, Operand(offset));
}
emit(cond | 0xD1*B20 | ip.code()*B16 | dst.code()*B12 | 0xB*B8);
}
}
void Assembler::vldr(const DwVfpRegister dst,
const MemOperand& operand,
const Condition cond) {
ASSERT(!operand.rm().is_valid());
ASSERT(operand.am_ == Offset);
vldr(dst, operand.rn(), operand.offset(), cond);
}
@ -1870,13 +1890,33 @@ void Assembler::vldr(const SwVfpRegister dst,
offset = -offset;
u = 0;
}
ASSERT(offset % 4 == 0);
ASSERT((offset / 4) < 256);
ASSERT(offset >= 0);
int sd, d;
dst.split_code(&sd, &d);
ASSERT(offset >= 0);
if ((offset % 4) == 0 && (offset / 4) < 256) {
emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 |
0xA*B8 | ((offset / 4) & 255));
} else {
// Larger offsets must be handled by computing the correct address
// in the ip register.
ASSERT(!base.is(ip));
if (u == 1) {
add(ip, base, Operand(offset));
} else {
sub(ip, base, Operand(offset));
}
emit(cond | d*B22 | 0xD1*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
}
}
void Assembler::vldr(const SwVfpRegister dst,
const MemOperand& operand,
const Condition cond) {
ASSERT(!operand.rm().is_valid());
ASSERT(operand.am_ == Offset);
vldr(dst, operand.rn(), operand.offset(), cond);
}
@ -1894,11 +1934,30 @@ void Assembler::vstr(const DwVfpRegister src,
offset = -offset;
u = 0;
}
ASSERT(offset % 4 == 0);
ASSERT((offset / 4) < 256);
ASSERT(offset >= 0);
emit(cond | u*B23 | 0xD0*B20 | base.code()*B16 | src.code()*B12 |
0xB*B8 | ((offset / 4) & 255));
if ((offset % 4) == 0 && (offset / 4) < 256) {
emit(cond | u*B23 | 0xD0*B20 | base.code()*B16 | src.code()*B12 |
0xB*B8 | ((offset / 4) & 255));
} else {
// Larger offsets must be handled by computing the correct address
// in the ip register.
ASSERT(!base.is(ip));
if (u == 1) {
add(ip, base, Operand(offset));
} else {
sub(ip, base, Operand(offset));
}
emit(cond | 0xD0*B20 | ip.code()*B16 | src.code()*B12 | 0xB*B8);
}
}
void Assembler::vstr(const DwVfpRegister src,
const MemOperand& operand,
const Condition cond) {
ASSERT(!operand.rm().is_valid());
ASSERT(operand.am_ == Offset);
vstr(src, operand.rn(), operand.offset(), cond);
}
@ -1916,13 +1975,32 @@ void Assembler::vstr(const SwVfpRegister src,
offset = -offset;
u = 0;
}
ASSERT(offset % 4 == 0);
ASSERT((offset / 4) < 256);
ASSERT(offset >= 0);
int sd, d;
src.split_code(&sd, &d);
emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 |
0xA*B8 | ((offset / 4) & 255));
ASSERT(offset >= 0);
if ((offset % 4) == 0 && (offset / 4) < 256) {
emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 |
0xA*B8 | ((offset / 4) & 255));
} else {
// Larger offsets must be handled by computing the correct address
// in the ip register.
ASSERT(!base.is(ip));
if (u == 1) {
add(ip, base, Operand(offset));
} else {
sub(ip, base, Operand(offset));
}
emit(cond | d*B22 | 0xD0*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
}
}
void Assembler::vstr(const SwVfpRegister src,
const MemOperand& operand,
const Condition cond) {
ASSERT(!operand.rm().is_valid());
ASSERT(operand.am_ == Offset);
vldr(src, operand.rn(), operand.offset(), cond);
}

View File

@ -387,7 +387,7 @@ class Operand BASE_EMBEDDED {
// Return true if this is a register operand.
INLINE(bool is_reg() const);
// Return true of this operand fits in one instruction so that no
// Return true if this operand fits in one instruction so that no
// 2-instruction solution with a load into the ip register is necessary.
bool is_single_instruction() const;
bool must_use_constant_pool() const;
@ -439,7 +439,7 @@ class MemOperand BASE_EMBEDDED {
offset_ = offset;
}
uint32_t offset() {
uint32_t offset() const {
ASSERT(rm_.is(no_reg));
return offset_;
}
@ -447,6 +447,10 @@ class MemOperand BASE_EMBEDDED {
Register rn() const { return rn_; }
Register rm() const { return rm_; }
bool OffsetIsUint12Encodable() const {
return offset_ >= 0 ? is_uint12(offset_) : is_uint12(-offset_);
}
private:
Register rn_; // base
Register rm_; // register offset
@ -902,22 +906,34 @@ class Assembler : public Malloced {
void vldr(const DwVfpRegister dst,
const Register base,
int offset, // Offset must be a multiple of 4.
int offset,
const Condition cond = al);
void vldr(const DwVfpRegister dst,
const MemOperand& src,
const Condition cond = al);
void vldr(const SwVfpRegister dst,
const Register base,
int offset, // Offset must be a multiple of 4.
int offset,
const Condition cond = al);
void vldr(const SwVfpRegister dst,
const MemOperand& src,
const Condition cond = al);
void vstr(const DwVfpRegister src,
const Register base,
int offset, // Offset must be a multiple of 4.
int offset,
const Condition cond = al);
void vstr(const DwVfpRegister src,
const MemOperand& dst,
const Condition cond = al);
void vstr(const SwVfpRegister src,
const Register base,
int offset, // Offset must be a multiple of 4.
int offset,
const Condition cond = al);
void vstr(const SwVfpRegister src,
const MemOperand& dst,
const Condition cond = al);
void vmov(const DwVfpRegister dst,

View File

@ -2661,8 +2661,8 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// Allocate new heap number for result.
Register result = r5;
__ AllocateHeapNumber(
result, scratch1, scratch2, heap_number_map, gc_required);
GenerateHeapResultAllocation(
masm, result, heap_number_map, scratch1, scratch2, gc_required);
// Load the operands.
if (smi_operands) {
@ -2811,8 +2811,14 @@ void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
// Allocate new heap number for result.
__ bind(&result_not_a_smi);
__ AllocateHeapNumber(
r5, scratch1, scratch2, heap_number_map, gc_required);
Register result = r5;
if (smi_operands) {
__ AllocateHeapNumber(
result, scratch1, scratch2, heap_number_map, gc_required);
} else {
GenerateHeapResultAllocation(
masm, result, heap_number_map, scratch1, scratch2, gc_required);
}
// r2: Answer as signed int32.
// r5: Heap number to write answer into.
@ -2934,45 +2940,47 @@ void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
Label call_runtime;
Label call_runtime, call_string_add_or_runtime;
GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
// If all else fails, use the runtime system to get the correct
// result.
__ bind(&call_runtime);
GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
// Try to add strings before calling runtime.
__ bind(&call_string_add_or_runtime);
if (op_ == Token::ADD) {
GenerateAddStrings(masm);
}
GenericBinaryOpStub stub(op_, mode_, r1, r0);
__ TailCallStub(&stub);
__ bind(&call_runtime);
GenerateCallRuntime(masm);
}
void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
ASSERT(op_ == Token::ADD);
Label left_not_string, call_runtime;
Register left = r1;
Register right = r0;
Label call_runtime;
// Check if first argument is a string.
__ JumpIfSmi(left, &call_runtime);
// Check if left argument is a string.
__ JumpIfSmi(left, &left_not_string);
__ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
__ b(ge, &call_runtime);
__ b(ge, &left_not_string);
// First argument is a a string, test second.
StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
GenerateRegisterArgsPush(masm);
__ TailCallStub(&string_add_left_stub);
// Left operand is not a string, test right.
__ bind(&left_not_string);
__ JumpIfSmi(right, &call_runtime);
__ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
__ b(ge, &call_runtime);
// First and second argument are strings.
StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
GenerateRegisterArgsPush(masm);
__ TailCallStub(&string_add_stub);
__ TailCallStub(&string_add_right_stub);
// At least one argument is not a string.
__ bind(&call_runtime);
@ -3706,7 +3714,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// The offset was stored in r4 safepoint slot.
// (See LCodeGen::DoDeferredLInstanceOfKnownGlobal)
__ ldr(scratch, MacroAssembler::SafepointRegisterSlot(r4));
__ LoadFromSafepointRegisterSlot(scratch, r4);
__ sub(inline_site, lr, scratch);
// Get the map location in scratch and patch it.
__ GetRelocatedValueLocation(inline_site, scratch);
@ -5438,18 +5446,19 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
void StringAddStub::Generate(MacroAssembler* masm) {
Label string_add_runtime;
Label string_add_runtime, call_builtin;
Builtins::JavaScript builtin_id = Builtins::ADD;
// Stack on entry:
// sp[0]: second argument.
// sp[4]: first argument.
// sp[0]: second argument (right).
// sp[4]: first argument (left).
// Load the two arguments.
__ ldr(r0, MemOperand(sp, 1 * kPointerSize)); // First argument.
__ ldr(r1, MemOperand(sp, 0 * kPointerSize)); // Second argument.
// Make sure that both arguments are strings if not known in advance.
if (string_check_) {
STATIC_ASSERT(kSmiTag == 0);
if (flags_ == NO_STRING_ADD_FLAGS) {
__ JumpIfEitherSmi(r0, r1, &string_add_runtime);
// Load instance types.
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
@ -5461,13 +5470,27 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ tst(r4, Operand(kIsNotStringMask));
__ tst(r5, Operand(kIsNotStringMask), eq);
__ b(ne, &string_add_runtime);
} else {
// Here at least one of the arguments is definitely a string.
// We convert the one that is not known to be a string.
if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
GenerateConvertArgument(
masm, 1 * kPointerSize, r0, r2, r3, r4, r5, &call_builtin);
builtin_id = Builtins::STRING_ADD_RIGHT;
} else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
GenerateConvertArgument(
masm, 0 * kPointerSize, r1, r2, r3, r4, r5, &call_builtin);
builtin_id = Builtins::STRING_ADD_LEFT;
}
}
// Both arguments are strings.
// r0: first string
// r1: second string
// r4: first string instance type (if string_check_)
// r5: second string instance type (if string_check_)
// r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
// r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
{
Label strings_not_empty;
// Check if either of the strings are empty. In that case return the other.
@ -5495,8 +5518,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// r1: second string
// r2: length of first string
// r3: length of second string
// r4: first string instance type (if string_check_)
// r5: second string instance type (if string_check_)
// r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
// r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
// Look at the length of the result of adding the two strings.
Label string_add_flat_result, longer_than_two;
// Adding two lengths can't overflow.
@ -5508,7 +5531,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ b(ne, &longer_than_two);
// Check that both strings are non-external ascii strings.
if (!string_check_) {
if (flags_ != NO_STRING_ADD_FLAGS) {
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
@ -5556,7 +5579,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// If result is not supposed to be flat, allocate a cons string object.
// If both strings are ascii the result is an ascii cons string.
if (!string_check_) {
if (flags_ != NO_STRING_ADD_FLAGS) {
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
@ -5604,11 +5627,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// r1: second string
// r2: length of first string
// r3: length of second string
// r4: first string instance type (if string_check_)
// r5: second string instance type (if string_check_)
// r4: first string instance type (if flags_ == NO_STRING_ADD_FLAGS)
// r5: second string instance type (if flags_ == NO_STRING_ADD_FLAGS)
// r6: sum of lengths.
__ bind(&string_add_flat_result);
if (!string_check_) {
if (flags_ != NO_STRING_ADD_FLAGS) {
__ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
@ -5706,6 +5729,60 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to add the two strings.
__ bind(&string_add_runtime);
__ TailCallRuntime(Runtime::kStringAdd, 2, 1);
if (call_builtin.is_linked()) {
__ bind(&call_builtin);
__ InvokeBuiltin(builtin_id, JUMP_JS);
}
}
void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
int stack_offset,
Register arg,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
Label* slow) {
// First check if the argument is already a string.
Label not_string, done;
__ JumpIfSmi(arg, &not_string);
__ CompareObjectType(arg, scratch1, scratch1, FIRST_NONSTRING_TYPE);
__ b(lt, &done);
// Check the number to string cache.
Label not_cached;
__ bind(&not_string);
// Puts the cached result into scratch1.
NumberToStringStub::GenerateLookupNumberStringCache(masm,
arg,
scratch1,
scratch2,
scratch3,
scratch4,
false,
&not_cached);
__ mov(arg, scratch1);
__ str(arg, MemOperand(sp, stack_offset));
__ jmp(&done);
// Check if the argument is a safe string wrapper.
__ bind(&not_cached);
__ JumpIfSmi(arg, slow);
__ CompareObjectType(
arg, scratch1, scratch2, JS_VALUE_TYPE); // map -> scratch1.
__ b(ne, slow);
__ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitField2Offset));
__ and_(scratch2,
scratch2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
__ cmp(scratch2,
Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
__ b(ne, slow);
__ ldr(arg, FieldMemOperand(arg, JSValue::kValueOffset));
__ str(arg, MemOperand(sp, stack_offset));
__ bind(&done);
}

View File

@ -335,24 +335,36 @@ class TypeRecordingBinaryOpStub: public CodeStub {
// Flag that indicates how to generate code for the stub StringAddStub.
enum StringAddFlags {
NO_STRING_ADD_FLAGS = 0,
NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
// Omit left string check in stub (left is definitely a string).
NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0,
// Omit right string check in stub (right is definitely a string).
NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1,
// Omit both string checks in stub.
NO_STRING_CHECK_IN_STUB =
NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
};
class StringAddStub: public CodeStub {
public:
explicit StringAddStub(StringAddFlags flags) {
string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
}
explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
private:
Major MajorKey() { return StringAdd; }
int MinorKey() { return string_check_ ? 0 : 1; }
int MinorKey() { return flags_; }
void Generate(MacroAssembler* masm);
// Should the stub check whether arguments are strings?
bool string_check_;
void GenerateConvertArgument(MacroAssembler* masm,
int stack_offset,
Register arg,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
Label* slow);
const StringAddFlags flags_;
};

View File

@ -5850,8 +5850,8 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
} else if (variable != NULL) {
// Delete of an unqualified identifier is disallowed in strict mode
// so this code can only be reached in non-strict mode.
ASSERT(strict_mode_flag() == kNonStrictMode);
// but "delete this" is.
ASSERT(strict_mode_flag() == kNonStrictMode || variable->is_this());
Slot* slot = variable->AsSlot();
if (variable->is_global()) {
LoadGlobal();

View File

@ -50,6 +50,11 @@ void CPU::Setup() {
void CPU::FlushICache(void* start, size_t size) {
// Nothing to do flushing no instructions.
if (size == 0) {
return;
}
#if defined (USE_SIMULATOR)
// Not generating ARM instructions for C-code. This means that we are
// building an ARM emulator based target. We should notify the simulator

View File

@ -429,14 +429,16 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
fp_value, output_offset, value);
}
// The context can be gotten from the function so long as we don't
// optimize functions that need local contexts.
// For the bottommost output frame the context can be gotten from the input
// frame. For all subsequent output frames it can be gotten from the function
// so long as we don't inline functions that need local contexts.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(function->context());
// The context for the bottommost output frame should also agree with the
// input frame.
ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
if (is_bottommost) {
value = input_->GetFrameSlot(input_offset);
} else {
value = reinterpret_cast<intptr_t>(function->context());
}
output_frame->SetFrameSlot(output_offset, value);
if (is_topmost) {
output_frame->SetRegister(cp.code(), value);

View File

@ -219,46 +219,47 @@ void FullCodeGenerator::Generate(CompilationInfo* info) {
Move(dot_arguments_slot, r3, r1, r2);
}
{ Comment cmnt(masm_, "[ Declarations");
// For named function expressions, declare the function name as a
// constant.
if (scope()->is_function_scope() && scope()->function() != NULL) {
EmitDeclaration(scope()->function(), Variable::CONST, NULL);
}
// Visit all the explicit declarations unless there is an illegal
// redeclaration.
if (scope()->HasIllegalRedeclaration()) {
scope()->VisitIllegalRedeclaration(this);
} else {
VisitDeclarations(scope()->declarations());
}
}
if (FLAG_trace) {
__ CallRuntime(Runtime::kTraceEnter, 0);
}
// Check the stack for overflow or break request.
{ Comment cmnt(masm_, "[ Stack check");
PrepareForBailout(info->function(), NO_REGISTERS);
Label ok;
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
StackCheckStub stub;
__ CallStub(&stub);
__ bind(&ok);
}
{ Comment cmnt(masm_, "[ Body");
ASSERT(loop_depth() == 0);
VisitStatements(function()->body());
ASSERT(loop_depth() == 0);
// Visit the declarations and body unless there is an illegal
// redeclaration.
if (scope()->HasIllegalRedeclaration()) {
Comment cmnt(masm_, "[ Declarations");
scope()->VisitIllegalRedeclaration(this);
} else {
{ Comment cmnt(masm_, "[ Declarations");
// For named function expressions, declare the function name as a
// constant.
if (scope()->is_function_scope() && scope()->function() != NULL) {
EmitDeclaration(scope()->function(), Variable::CONST, NULL);
}
VisitDeclarations(scope()->declarations());
}
{ Comment cmnt(masm_, "[ Stack check");
PrepareForBailout(info->function(), NO_REGISTERS);
Label ok;
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
StackCheckStub stub;
__ CallStub(&stub);
__ bind(&ok);
}
{ Comment cmnt(masm_, "[ Body");
ASSERT(loop_depth() == 0);
VisitStatements(function()->body());
ASSERT(loop_depth() == 0);
}
}
// Always emit a 'return undefined' in case control fell off the end of
// the body.
{ Comment cmnt(masm_, "[ return <undefined>;");
// Emit a 'return undefined' in case control fell off the end of the
// body.
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
}
EmitReturnSequence();
@ -694,10 +695,11 @@ void FullCodeGenerator::EmitDeclaration(Variable* variable,
// We bypass the general EmitSlotSearch because we know more about
// this specific context.
// The variable in the decl always resides in the current context.
// The variable in the decl always resides in the current function
// context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
if (FLAG_debug_code) {
// Check if we have the correct context pointer.
// Check that we're not inside a 'with'.
__ ldr(r1, ContextOperand(cp, Context::FCONTEXT_INDEX));
__ cmp(r1, cp);
__ Check(eq, "Unexpected declaration in current context.");
@ -1037,7 +1039,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(
Slot* slot,
Label* slow) {
ASSERT(slot->type() == Slot::CONTEXT);
Register current = cp;
Register context = cp;
Register next = r3;
Register temp = r4;
@ -1045,22 +1047,25 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(
if (s->num_heap_slots() > 0) {
if (s->calls_eval()) {
// Check that extension is NULL.
__ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX));
__ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX));
__ tst(temp, temp);
__ b(ne, slow);
}
__ ldr(next, ContextOperand(current, Context::CLOSURE_INDEX));
__ ldr(next, ContextOperand(context, Context::CLOSURE_INDEX));
__ ldr(next, FieldMemOperand(next, JSFunction::kContextOffset));
// Walk the rest of the chain without clobbering cp.
current = next;
context = next;
}
}
// Check that last extension is NULL.
__ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX));
__ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX));
__ tst(temp, temp);
__ b(ne, slow);
__ ldr(temp, ContextOperand(current, Context::FCONTEXT_INDEX));
return ContextOperand(temp, slot->index());
// This function is used only for loads, not stores, so it's safe to
// return an cp-based operand (the write barrier cannot be allowed to
// destroy the cp register).
return ContextOperand(context, slot->index());
}
@ -2004,34 +2009,60 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
: Builtins::StoreIC_Initialize));
EmitCallIC(ic, RelocInfo::CODE_TARGET_CONTEXT);
} else if (var->mode() != Variable::CONST || op == Token::INIT_CONST) {
// Perform the assignment for non-const variables and for initialization
// of const variables. Const assignments are simply skipped.
Label done;
} else if (op == Token::INIT_CONST) {
// Like var declarations, const declarations are hoisted to function
// scope. However, unlike var initializers, const initializers are able
// to drill a hole to that function context, even from inside a 'with'
// context. We thus bypass the normal static scope lookup.
Slot* slot = var->AsSlot();
Label skip;
switch (slot->type()) {
case Slot::PARAMETER:
// No const parameters.
UNREACHABLE();
break;
case Slot::LOCAL:
// Detect const reinitialization by checking for the hole value.
__ ldr(r1, MemOperand(fp, SlotOffset(slot)));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(r1, ip);
__ b(ne, &skip);
__ str(result_register(), MemOperand(fp, SlotOffset(slot)));
break;
case Slot::CONTEXT: {
__ ldr(r1, ContextOperand(cp, Context::FCONTEXT_INDEX));
__ ldr(r2, ContextOperand(r1, slot->index()));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(r2, ip);
__ b(ne, &skip);
__ str(r0, ContextOperand(r1, slot->index()));
int offset = Context::SlotOffset(slot->index());
__ mov(r3, r0); // Preserve the stored value in r0.
__ RecordWrite(r1, Operand(offset), r3, r2);
break;
}
case Slot::LOOKUP:
__ push(r0);
__ mov(r0, Operand(slot->var()->name()));
__ Push(cp, r0); // Context and name.
__ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
break;
}
__ bind(&skip);
} else if (var->mode() != Variable::CONST) {
// Perform the assignment for non-const variables. Const assignments
// are simply skipped.
Slot* slot = var->AsSlot();
switch (slot->type()) {
case Slot::PARAMETER:
case Slot::LOCAL:
if (op == Token::INIT_CONST) {
// Detect const reinitialization by checking for the hole value.
__ ldr(r1, MemOperand(fp, SlotOffset(slot)));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(r1, ip);
__ b(ne, &done);
}
// Perform the assignment.
__ str(result_register(), MemOperand(fp, SlotOffset(slot)));
break;
case Slot::CONTEXT: {
MemOperand target = EmitSlotSearch(slot, r1);
if (op == Token::INIT_CONST) {
// Detect const reinitialization by checking for the hole value.
__ ldr(r2, target);
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(r2, ip);
__ b(ne, &done);
}
// Perform the assignment and issue the write barrier.
__ str(result_register(), target);
// RecordWrite may destroy all its register arguments.
@ -2042,20 +2073,13 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
}
case Slot::LOOKUP:
// Call the runtime for the assignment. The runtime will ignore
// const reinitialization.
// Call the runtime for the assignment.
__ push(r0); // Value.
__ mov(r0, Operand(slot->var()->name()));
__ Push(cp, r0); // Context and name.
if (op == Token::INIT_CONST) {
// The runtime will ignore const redeclaration.
__ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
} else {
__ CallRuntime(Runtime::kStoreContextSlot, 3);
}
__ CallRuntime(Runtime::kStoreContextSlot, 3);
break;
}
__ bind(&done);
}
}
@ -3373,8 +3397,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
}
} else if (var != NULL) {
// Delete of an unqualified identifier is disallowed in strict mode
// so this code can only be reached in non-strict mode.
ASSERT(strict_mode_flag() == kNonStrictMode);
// but "delete this" is.
ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
if (var->is_global()) {
__ ldr(r2, GlobalObjectOperand());
__ mov(r1, Operand(var->name()));
@ -3414,17 +3438,23 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
case Token::NOT: {
Comment cmnt(masm_, "[ UnaryOperation (NOT)");
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
if (context()->IsEffect()) {
// Unary NOT has no side effects so it's only necessary to visit the
// subexpression. Match the optimizing compiler by not branching.
VisitForEffect(expr->expression());
} else {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
// Notice that the labels are swapped.
context()->PrepareTest(&materialize_true, &materialize_false,
&if_false, &if_true, &fall_through);
if (context()->IsTest()) ForwardBailoutToChild(expr);
VisitForControl(expr->expression(), if_true, if_false, fall_through);
context()->Plug(if_false, if_true); // Labels swapped.
// Notice that the labels are swapped.
context()->PrepareTest(&materialize_true, &materialize_false,
&if_false, &if_true, &fall_through);
if (context()->IsTest()) ForwardBailoutToChild(expr);
VisitForControl(expr->expression(), if_true, if_false, fall_through);
context()->Plug(if_false, if_true); // Labels swapped.
}
break;
}

View File

@ -346,7 +346,7 @@ void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
}
void LStoreNamed::PrintDataTo(StringStream* stream) {
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(".");
stream->Add(*String::cast(*name())->ToCString());
@ -355,7 +355,25 @@ void LStoreNamed::PrintDataTo(StringStream* stream) {
}
void LStoreKeyed::PrintDataTo(StringStream* stream) {
void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(".");
stream->Add(*String::cast(*name())->ToCString());
stream->Add(" <- ");
value()->PrintTo(stream);
}
void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
stream->Add("] <- ");
value()->PrintTo(stream);
}
void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
@ -1204,8 +1222,7 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
case kMathSqrt:
return DefineSameAsFirst(result);
case kMathRound:
Abort("MathRound LUnaryMathOperation not implemented");
return NULL;
return AssignEnvironment(DefineAsRegister(result));
case kMathPowHalf:
Abort("MathPowHalf LUnaryMathOperation not implemented");
return NULL;
@ -1418,8 +1435,19 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
LInstruction* LChunkBuilder::DoPower(HPower* instr) {
Abort("LPower instruction not implemented on ARM");
return NULL;
ASSERT(instr->representation().IsDouble());
// We call a C function for double power. It can't trigger a GC.
// We need to use fixed result register for the call.
Representation exponent_type = instr->right()->representation();
ASSERT(instr->left()->representation().IsDouble());
LOperand* left = UseFixedDouble(instr->left(), d1);
LOperand* right = exponent_type.IsDouble() ?
UseFixedDouble(instr->right(), d2) :
UseFixed(instr->right(), r0);
LPower* result = new LPower(left, right);
return MarkAsCall(DefineFixedDouble(result, d3),
instr,
CAN_DEOPTIMIZE_EAGERLY);
}
@ -1709,11 +1737,13 @@ LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
LOperand* context = UseTempRegister(instr->context());
LOperand* context;
LOperand* value;
if (instr->NeedsWriteBarrier()) {
context = UseTempRegister(instr->context());
value = UseTempRegister(instr->value());
} else {
context = UseRegister(instr->context());
value = UseRegister(instr->value());
}
return new LStoreContextSlot(context, value);
@ -1806,6 +1836,13 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
}
LInstruction* LChunkBuilder::DoStorePixelArrayElement(
HStorePixelArrayElement* instr) {
Abort("DoStorePixelArrayElement not implemented");
return NULL;
}
LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LOperand* obj = UseFixed(instr->object(), r2);
LOperand* key = UseFixed(instr->key(), r1);
@ -1911,8 +1948,10 @@ LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
// There are no real uses of the arguments object (we bail out in all other
// cases).
// There are no real uses of the arguments object.
// arguments.length and element access are supported directly on
// stack arguments, and any real arguments object use causes a bailout.
// So this value is never used.
return NULL;
}

View File

@ -42,8 +42,6 @@ class LCodeGen;
#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
V(ControlInstruction) \
V(Call) \
V(StoreKeyed) \
V(StoreNamed) \
LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
@ -135,6 +133,7 @@ class LCodeGen;
V(OuterContext) \
V(Parameter) \
V(PixelArrayLength) \
V(Power) \
V(PushArgument) \
V(RegExpLiteral) \
V(Return) \
@ -1058,6 +1057,18 @@ class LAddI: public LTemplateInstruction<1, 2, 0> {
};
class LPower: public LTemplateInstruction<1, 2, 0> {
public:
LPower(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
DECLARE_CONCRETE_INSTRUCTION(Power, "power")
DECLARE_HYDROGEN_ACCESSOR(Power)
};
class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
public:
LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
@ -1510,15 +1521,38 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
};
class LStoreNamed: public LTemplateInstruction<0, 2, 0> {
class LStoreNamedField: public LTemplateInstruction<0, 2, 0> {
public:
LStoreNamed(LOperand* obj, LOperand* val) {
LStoreNamedField(LOperand* obj, LOperand* val) {
inputs_[0] = obj;
inputs_[1] = val;
}
DECLARE_INSTRUCTION(StoreNamed)
DECLARE_HYDROGEN_ACCESSOR(StoreNamed)
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
virtual void PrintDataTo(StringStream* stream);
LOperand* object() { return inputs_[0]; }
LOperand* value() { return inputs_[1]; }
Handle<Object> name() const { return hydrogen()->name(); }
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
Handle<Map> transition() const { return hydrogen()->transition(); }
};
class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
public:
LStoreNamedGeneric(LOperand* obj, LOperand* val) {
inputs_[0] = obj;
inputs_[1] = val;
}
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
virtual void PrintDataTo(StringStream* stream);
@ -1528,40 +1562,17 @@ class LStoreNamed: public LTemplateInstruction<0, 2, 0> {
};
class LStoreNamedField: public LStoreNamed {
class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
public:
LStoreNamedField(LOperand* obj, LOperand* val)
: LStoreNamed(obj, val) { }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
Handle<Map> transition() const { return hydrogen()->transition(); }
};
class LStoreNamedGeneric: public LStoreNamed {
public:
LStoreNamedGeneric(LOperand* obj, LOperand* val)
: LStoreNamed(obj, val) { }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
};
class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) {
LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) {
inputs_[0] = obj;
inputs_[1] = key;
inputs_[2] = val;
}
DECLARE_INSTRUCTION(StoreKeyed)
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
"store-keyed-fast-element")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
virtual void PrintDataTo(StringStream* stream);
@ -1571,23 +1582,21 @@ class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
};
class LStoreKeyedFastElement: public LStoreKeyed {
class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val)
: LStoreKeyed(obj, key, val) {}
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
"store-keyed-fast-element")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
};
class LStoreKeyedGeneric: public LStoreKeyed {
public:
LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val)
: LStoreKeyed(obj, key, val) { }
LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val) {
inputs_[0] = obj;
inputs_[1] = key;
inputs_[2] = val;
}
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
virtual void PrintDataTo(StringStream* stream);
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
};

View File

@ -26,6 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "arm/lithium-codegen-arm.h"
#include "arm/lithium-gap-resolver-arm.h"
#include "code-stubs.h"
#include "stub-cache.h"
@ -54,157 +55,6 @@ class SafepointGenerator : public PostCallGenerator {
};
class LGapNode: public ZoneObject {
public:
explicit LGapNode(LOperand* operand)
: operand_(operand), resolved_(false), visited_id_(-1) { }
LOperand* operand() const { return operand_; }
bool IsResolved() const { return !IsAssigned() || resolved_; }
void MarkResolved() {
ASSERT(!IsResolved());
resolved_ = true;
}
int visited_id() const { return visited_id_; }
void set_visited_id(int id) {
ASSERT(id > visited_id_);
visited_id_ = id;
}
bool IsAssigned() const { return assigned_from_.is_set(); }
LGapNode* assigned_from() const { return assigned_from_.get(); }
void set_assigned_from(LGapNode* n) { assigned_from_.set(n); }
private:
LOperand* operand_;
SetOncePointer<LGapNode> assigned_from_;
bool resolved_;
int visited_id_;
};
LGapResolver::LGapResolver()
: nodes_(32),
identified_cycles_(4),
result_(16),
next_visited_id_(0) {
}
const ZoneList<LMoveOperands>* LGapResolver::Resolve(
const ZoneList<LMoveOperands>* moves,
LOperand* marker_operand) {
nodes_.Rewind(0);
identified_cycles_.Rewind(0);
result_.Rewind(0);
next_visited_id_ = 0;
for (int i = 0; i < moves->length(); ++i) {
LMoveOperands move = moves->at(i);
if (!move.IsRedundant()) RegisterMove(move);
}
for (int i = 0; i < identified_cycles_.length(); ++i) {
ResolveCycle(identified_cycles_[i], marker_operand);
}
int unresolved_nodes;
do {
unresolved_nodes = 0;
for (int j = 0; j < nodes_.length(); j++) {
LGapNode* node = nodes_[j];
if (!node->IsResolved() && node->assigned_from()->IsResolved()) {
AddResultMove(node->assigned_from(), node);
node->MarkResolved();
}
if (!node->IsResolved()) ++unresolved_nodes;
}
} while (unresolved_nodes > 0);
return &result_;
}
void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) {
AddResultMove(from->operand(), to->operand());
}
void LGapResolver::AddResultMove(LOperand* from, LOperand* to) {
result_.Add(LMoveOperands(from, to));
}
void LGapResolver::ResolveCycle(LGapNode* start, LOperand* marker_operand) {
ZoneList<LOperand*> cycle_operands(8);
cycle_operands.Add(marker_operand);
LGapNode* cur = start;
do {
cur->MarkResolved();
cycle_operands.Add(cur->operand());
cur = cur->assigned_from();
} while (cur != start);
cycle_operands.Add(marker_operand);
for (int i = cycle_operands.length() - 1; i > 0; --i) {
LOperand* from = cycle_operands[i];
LOperand* to = cycle_operands[i - 1];
AddResultMove(from, to);
}
}
bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) {
ASSERT(a != b);
LGapNode* cur = a;
while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned()) {
cur->set_visited_id(visited_id);
cur = cur->assigned_from();
}
return cur == b;
}
bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) {
ASSERT(a != b);
return CanReach(a, b, next_visited_id_++);
}
void LGapResolver::RegisterMove(LMoveOperands move) {
if (move.source()->IsConstantOperand()) {
// Constant moves should be last in the machine code. Therefore add them
// first to the result set.
AddResultMove(move.source(), move.destination());
} else {
LGapNode* from = LookupNode(move.source());
LGapNode* to = LookupNode(move.destination());
if (to->IsAssigned() && to->assigned_from() == from) {
move.Eliminate();
return;
}
ASSERT(!to->IsAssigned());
if (CanReach(from, to)) {
// This introduces a cycle. Save.
identified_cycles_.Add(from);
}
to->set_assigned_from(from);
}
}
LGapNode* LGapResolver::LookupNode(LOperand* operand) {
for (int i = 0; i < nodes_.length(); ++i) {
if (nodes_[i]->operand()->Equals(operand)) return nodes_[i];
}
// No node found => create a new one.
LGapNode* result = new LGapNode(operand);
nodes_.Add(result);
return result;
}
#define __ masm()->
bool LCodeGen::GenerateCode() {
@ -294,6 +144,44 @@ bool LCodeGen::GeneratePrologue() {
}
}
// Possibly allocate a local context.
int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment(";;; Allocate local context");
// Argument to NewContext is the function, which is in r1.
__ push(r1);
if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
__ CallRuntime(Runtime::kNewContext, 1);
}
RecordSafepoint(Safepoint::kNoDeoptimizationIndex);
// Context is returned in both r0 and cp. It replaces the context
// passed to us. It's saved in the stack and kept live in cp.
__ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
int num_parameters = scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
Slot* slot = scope()->parameter(i)->AsSlot();
if (slot != NULL && slot->type() == Slot::CONTEXT) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
// Load parameter from stack.
__ ldr(r0, MemOperand(fp, parameter_offset));
// Store it in the context.
__ mov(r1, Operand(Context::SlotOffset(slot->index())));
__ str(r0, MemOperand(cp, r1));
// Update the write barrier. This clobbers all involved
// registers, so we have to use two more registers to avoid
// clobbering cp.
__ mov(r2, Operand(cp));
__ RecordWrite(r2, Operand(r1), r3, r0);
}
}
Comment(";;; End allocate local context");
}
// Trace the call.
if (FLAG_trace) {
__ CallRuntime(Runtime::kTraceEnter, 0);
@ -464,7 +352,6 @@ Operand LCodeGen::ToOperand(LOperand* op) {
MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
// TODO(regis): Revisit.
ASSERT(!op->IsRegister());
ASSERT(!op->IsDoubleRegister());
ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
@ -480,6 +367,21 @@ MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
}
MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
ASSERT(op->IsDoubleStackSlot());
int index = op->index();
if (index >= 0) {
// Local or spill slot. Skip the frame pointer, function, context,
// and the first word of the double in the fixed part of the frame.
return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize);
} else {
// Incoming parameter. Skip the return address and the first word of
// the double.
return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize);
}
}
void LCodeGen::WriteTranslation(LEnvironment* environment,
Translation* translation) {
if (environment == NULL) return;
@ -751,6 +653,12 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers,
}
void LCodeGen::RecordSafepoint(int deoptimization_index) {
LPointerMap empty_pointers(RelocInfo::kNoPosition);
RecordSafepoint(&empty_pointers, deoptimization_index);
}
void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
int deoptimization_index) {
@ -787,116 +695,7 @@ void LCodeGen::DoLabel(LLabel* label) {
void LCodeGen::DoParallelMove(LParallelMove* move) {
// d0 must always be a scratch register.
DoubleRegister dbl_scratch = d0;
LUnallocated marker_operand(LUnallocated::NONE);
Register core_scratch = scratch0();
bool destroys_core_scratch = false;
const ZoneList<LMoveOperands>* moves =
resolver_.Resolve(move->move_operands(), &marker_operand);
for (int i = moves->length() - 1; i >= 0; --i) {
LMoveOperands move = moves->at(i);
LOperand* from = move.source();
LOperand* to = move.destination();
ASSERT(!from->IsDoubleRegister() ||
!ToDoubleRegister(from).is(dbl_scratch));
ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(dbl_scratch));
ASSERT(!from->IsRegister() || !ToRegister(from).is(core_scratch));
ASSERT(!to->IsRegister() || !ToRegister(to).is(core_scratch));
if (from == &marker_operand) {
if (to->IsRegister()) {
__ mov(ToRegister(to), core_scratch);
ASSERT(destroys_core_scratch);
} else if (to->IsStackSlot()) {
__ str(core_scratch, ToMemOperand(to));
ASSERT(destroys_core_scratch);
} else if (to->IsDoubleRegister()) {
__ vmov(ToDoubleRegister(to), dbl_scratch);
} else {
ASSERT(to->IsDoubleStackSlot());
// TODO(regis): Why is vstr not taking a MemOperand?
// __ vstr(dbl_scratch, ToMemOperand(to));
MemOperand to_operand = ToMemOperand(to);
__ vstr(dbl_scratch, to_operand.rn(), to_operand.offset());
}
} else if (to == &marker_operand) {
if (from->IsRegister() || from->IsConstantOperand()) {
__ mov(core_scratch, ToOperand(from));
destroys_core_scratch = true;
} else if (from->IsStackSlot()) {
__ ldr(core_scratch, ToMemOperand(from));
destroys_core_scratch = true;
} else if (from->IsDoubleRegister()) {
__ vmov(dbl_scratch, ToDoubleRegister(from));
} else {
ASSERT(from->IsDoubleStackSlot());
// TODO(regis): Why is vldr not taking a MemOperand?
// __ vldr(dbl_scratch, ToMemOperand(from));
MemOperand from_operand = ToMemOperand(from);
__ vldr(dbl_scratch, from_operand.rn(), from_operand.offset());
}
} else if (from->IsConstantOperand()) {
if (to->IsRegister()) {
__ mov(ToRegister(to), ToOperand(from));
} else {
ASSERT(to->IsStackSlot());
__ mov(ip, ToOperand(from));
__ str(ip, ToMemOperand(to));
}
} else if (from->IsRegister()) {
if (to->IsRegister()) {
__ mov(ToRegister(to), ToOperand(from));
} else {
ASSERT(to->IsStackSlot());
__ str(ToRegister(from), ToMemOperand(to));
}
} else if (to->IsRegister()) {
ASSERT(from->IsStackSlot());
__ ldr(ToRegister(to), ToMemOperand(from));
} else if (from->IsStackSlot()) {
ASSERT(to->IsStackSlot());
__ ldr(ip, ToMemOperand(from));
__ str(ip, ToMemOperand(to));
} else if (from->IsDoubleRegister()) {
if (to->IsDoubleRegister()) {
__ vmov(ToDoubleRegister(to), ToDoubleRegister(from));
} else {
ASSERT(to->IsDoubleStackSlot());
// TODO(regis): Why is vstr not taking a MemOperand?
// __ vstr(dbl_scratch, ToMemOperand(to));
MemOperand to_operand = ToMemOperand(to);
__ vstr(ToDoubleRegister(from), to_operand.rn(), to_operand.offset());
}
} else if (to->IsDoubleRegister()) {
ASSERT(from->IsDoubleStackSlot());
// TODO(regis): Why is vldr not taking a MemOperand?
// __ vldr(ToDoubleRegister(to), ToMemOperand(from));
MemOperand from_operand = ToMemOperand(from);
__ vldr(ToDoubleRegister(to), from_operand.rn(), from_operand.offset());
} else {
ASSERT(to->IsDoubleStackSlot() && from->IsDoubleStackSlot());
// TODO(regis): Why is vldr not taking a MemOperand?
// __ vldr(dbl_scratch, ToMemOperand(from));
MemOperand from_operand = ToMemOperand(from);
__ vldr(dbl_scratch, from_operand.rn(), from_operand.offset());
// TODO(regis): Why is vstr not taking a MemOperand?
// __ vstr(dbl_scratch, ToMemOperand(to));
MemOperand to_operand = ToMemOperand(to);
__ vstr(dbl_scratch, to_operand.rn(), to_operand.offset());
}
}
if (destroys_core_scratch) {
__ ldr(core_scratch, MemOperand(fp, -kPointerSize));
}
LInstruction* next = GetNextInstruction();
if (next != NULL && next->IsLazyBailout()) {
int pc = masm()->pc_offset();
safepoints_.SetPcAfterGap(pc);
}
resolver_.Resolve(move);
}
@ -987,7 +786,7 @@ void LCodeGen::DoModI(LModI* instr) {
DeferredModI(LCodeGen* codegen, LModI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() {
codegen()->DoDeferredGenericBinaryStub(instr_, Token::MOD);
codegen()->DoDeferredBinaryOpStub(instr_, Token::MOD);
}
private:
LModI* instr_;
@ -1016,7 +815,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ bind(&ok);
}
// Try a few common cases before using the generic stub.
// Try a few common cases before using the stub.
Label call_stub;
const int kUnfolds = 3;
// Skip if either side is negative.
@ -1044,7 +843,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ and_(result, scratch, Operand(left));
__ bind(&call_stub);
// Call the generic stub. The numbers in r0 and r1 have
// Call the stub. The numbers in r0 and r1 have
// to be tagged to Smis. If that is not possible, deoptimize.
DeferredModI* deferred = new DeferredModI(this, instr);
__ TrySmiTag(left, &deoptimize, scratch);
@ -1070,7 +869,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
DeferredDivI(LCodeGen* codegen, LDivI* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() {
codegen()->DoDeferredGenericBinaryStub(instr_, Token::DIV);
codegen()->DoDeferredBinaryOpStub(instr_, Token::DIV);
}
private:
LDivI* instr_;
@ -1123,7 +922,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ mov(result, Operand(left, ASR, 2), LeaveCC, eq);
__ b(eq, &done);
// Call the generic stub. The numbers in r0 and r1 have
// Call the stub. The numbers in r0 and r1 have
// to be tagged to Smis. If that is not possible, deoptimize.
DeferredDivI* deferred = new DeferredDivI(this, instr);
@ -1145,19 +944,33 @@ void LCodeGen::DoDivI(LDivI* instr) {
template<int T>
void LCodeGen::DoDeferredGenericBinaryStub(LTemplateInstruction<1, 2, T>* instr,
Token::Value op) {
void LCodeGen::DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
Token::Value op) {
Register left = ToRegister(instr->InputAt(0));
Register right = ToRegister(instr->InputAt(1));
__ PushSafepointRegistersAndDoubles();
GenericBinaryOpStub stub(op, OVERWRITE_LEFT, left, right);
// Move left to r1 and right to r0 for the stub call.
if (left.is(r1)) {
__ Move(r0, right);
} else if (left.is(r0) && right.is(r1)) {
__ Swap(r0, r1, r2);
} else if (left.is(r0)) {
ASSERT(!right.is(r1));
__ mov(r1, r0);
__ mov(r0, right);
} else {
ASSERT(!left.is(r0) && !right.is(r0));
__ mov(r0, right);
__ mov(r1, left);
}
TypeRecordingBinaryOpStub stub(op, OVERWRITE_LEFT);
__ CallStub(&stub);
RecordSafepointWithRegistersAndDoubles(instr->pointer_map(),
0,
Safepoint::kNoDeoptimizationIndex);
// Overwrite the stored value of r0 with the result of the stub.
__ StoreToSafepointRegistersAndDoublesSlot(r0);
__ StoreToSafepointRegistersAndDoublesSlot(r0, r0);
__ PopSafepointRegistersAndDoubles();
}
@ -1413,7 +1226,7 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
__ vmov(r2, r3, right);
__ CallCFunction(ExternalReference::double_fp_operation(Token::MOD), 4);
// Move the result in the double result register.
__ vmov(ToDoubleRegister(instr->result()), r0, r1);
__ GetCFunctionDoubleResult(ToDoubleRegister(instr->result()));
// Restore r0-r3.
__ ldm(ia_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
@ -1431,10 +1244,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
ASSERT(ToRegister(instr->InputAt(1)).is(r0));
ASSERT(ToRegister(instr->result()).is(r0));
// TODO(regis): Implement TypeRecordingBinaryOpStub and replace current
// GenericBinaryOpStub:
// TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
GenericBinaryOpStub stub(instr->op(), NO_OVERWRITE, r1, r0);
TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@ -2174,7 +1984,7 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
__ bind(&before_push_delta);
__ BlockConstPoolFor(kAdditionalDelta);
__ mov(temp, Operand(delta * kPointerSize));
__ StoreToSafepointRegisterSlot(temp);
__ StoreToSafepointRegisterSlot(temp, temp);
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
ASSERT_EQ(kAdditionalDelta,
masm_->InstructionsGeneratedSince(&before_push_delta));
@ -2182,7 +1992,7 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
// Put the result value into the result register slot and
// restore all registers.
__ StoreToSafepointRegisterSlot(result);
__ StoreToSafepointRegisterSlot(result, result);
__ PopSafepointRegisters();
}
@ -2302,17 +2112,13 @@ void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
__ ldr(result,
MemOperand(context, Context::SlotOffset(Context::FCONTEXT_INDEX)));
__ ldr(result, ContextOperand(result, instr->slot_index()));
__ ldr(result, ContextOperand(context, instr->slot_index()));
}
void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value());
__ ldr(context,
MemOperand(context, Context::SlotOffset(Context::FCONTEXT_INDEX)));
__ str(value, ContextOperand(context, instr->slot_index()));
if (instr->needs_write_barrier()) {
int offset = Context::SlotOffset(instr->slot_index());
@ -2715,7 +2521,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
// Set the pointer to the new heap number in tmp.
if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
// Restore input_reg after call to runtime.
__ LoadFromSafepointRegisterSlot(input);
__ LoadFromSafepointRegisterSlot(input, input);
__ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
__ bind(&allocated);
@ -2726,7 +2532,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
__ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
__ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
__ str(tmp1, masm()->SafepointRegisterSlot(input));
__ StoreToSafepointRegisterSlot(tmp1, input);
__ PopSafepointRegisters();
__ bind(&done);
@ -2843,6 +2649,30 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
}
void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
Register result = ToRegister(instr->result());
Register scratch1 = scratch0();
Register scratch2 = result;
EmitVFPTruncate(kRoundToNearest,
double_scratch0().low(),
input,
scratch1,
scratch2);
DeoptimizeIf(ne, instr->environment());
__ vmov(result, double_scratch0().low());
// Test for -0.
Label done;
__ cmp(result, Operand(0));
__ b(ne, &done);
__ vmov(scratch1, input.high());
__ tst(scratch1, Operand(HeapNumber::kSignMask));
DeoptimizeIf(ne, instr->environment());
__ bind(&done);
}
void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
ASSERT(ToDoubleRegister(instr->result()).is(input));
@ -2850,6 +2680,64 @@ void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
}
void LCodeGen::DoPower(LPower* instr) {
LOperand* left = instr->InputAt(0);
LOperand* right = instr->InputAt(1);
Register scratch = scratch0();
DoubleRegister result_reg = ToDoubleRegister(instr->result());
Representation exponent_type = instr->hydrogen()->right()->representation();
if (exponent_type.IsDouble()) {
// Prepare arguments and call C function.
__ PrepareCallCFunction(4, scratch);
__ vmov(r0, r1, ToDoubleRegister(left));
__ vmov(r2, r3, ToDoubleRegister(right));
__ CallCFunction(ExternalReference::power_double_double_function(), 4);
} else if (exponent_type.IsInteger32()) {
ASSERT(ToRegister(right).is(r0));
// Prepare arguments and call C function.
__ PrepareCallCFunction(4, scratch);
__ mov(r2, ToRegister(right));
__ vmov(r0, r1, ToDoubleRegister(left));
__ CallCFunction(ExternalReference::power_double_int_function(), 4);
} else {
ASSERT(exponent_type.IsTagged());
ASSERT(instr->hydrogen()->left()->representation().IsDouble());
Register right_reg = ToRegister(right);
// Check for smi on the right hand side.
Label non_smi, call;
__ JumpIfNotSmi(right_reg, &non_smi);
// Untag smi and convert it to a double.
__ SmiUntag(right_reg);
SwVfpRegister single_scratch = double_scratch0().low();
__ vmov(single_scratch, right_reg);
__ vcvt_f64_s32(result_reg, single_scratch);
__ jmp(&call);
// Heap number map check.
__ bind(&non_smi);
__ ldr(scratch, FieldMemOperand(right_reg, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(scratch, Operand(ip));
DeoptimizeIf(ne, instr->environment());
int32_t value_offset = HeapNumber::kValueOffset - kHeapObjectTag;
__ add(scratch, right_reg, Operand(value_offset));
__ vldr(result_reg, scratch, 0);
// Prepare arguments and call C function.
__ bind(&call);
__ PrepareCallCFunction(4, scratch);
__ vmov(r0, r1, ToDoubleRegister(left));
__ vmov(r2, r3, result_reg);
__ CallCFunction(ExternalReference::power_double_double_function(), 4);
}
// Store the result in the result register.
__ GetCFunctionDoubleResult(result_reg);
}
void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
switch (instr->op()) {
case kMathAbs:
@ -2858,6 +2746,9 @@ void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
case kMathFloor:
DoMathFloor(instr);
break;
case kMathRound:
DoMathRound(instr);
break;
case kMathSqrt:
DoMathSqrt(instr);
break;
@ -3157,8 +3048,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
__ AbortIfNotSmi(r0);
}
__ SmiUntag(r0);
MemOperand result_stack_slot = masm()->SafepointRegisterSlot(result);
__ str(r0, result_stack_slot);
__ StoreToSafepointRegisterSlot(r0, result);
__ PopSafepointRegisters();
}
@ -3239,9 +3129,7 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
// register is stored, as this register is in the pointer map, but contains an
// integer value.
__ mov(ip, Operand(0));
int reg_stack_index = __ SafepointRegisterStackIndex(reg.code());
__ str(ip, MemOperand(sp, reg_stack_index * kPointerSize));
__ StoreToSafepointRegisterSlot(ip, reg);
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
@ -3252,7 +3140,7 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
__ bind(&done);
__ sub(ip, reg, Operand(kHeapObjectTag));
__ vstr(dbl_scratch, ip, HeapNumber::kValueOffset);
__ str(reg, MemOperand(sp, reg_stack_index * kPointerSize));
__ StoreToSafepointRegisterSlot(reg, reg);
__ PopSafepointRegisters();
}
@ -3297,8 +3185,7 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
int reg_stack_index = __ SafepointRegisterStackIndex(reg.code());
__ str(r0, MemOperand(sp, reg_stack_index * kPointerSize));
__ StoreToSafepointRegisterSlot(r0, reg);
__ PopSafepointRegisters();
}

View File

@ -29,7 +29,7 @@
#define V8_ARM_LITHIUM_CODEGEN_ARM_H_
#include "arm/lithium-arm.h"
#include "arm/lithium-gap-resolver-arm.h"
#include "deoptimizer.h"
#include "safepoint-table.h"
#include "scopes.h"
@ -39,31 +39,8 @@ namespace internal {
// Forward declarations.
class LDeferredCode;
class LGapNode;
class SafepointGenerator;
class LGapResolver BASE_EMBEDDED {
public:
LGapResolver();
const ZoneList<LMoveOperands>* Resolve(const ZoneList<LMoveOperands>* moves,
LOperand* marker_operand);
private:
LGapNode* LookupNode(LOperand* operand);
bool CanReach(LGapNode* a, LGapNode* b, int visited_id);
bool CanReach(LGapNode* a, LGapNode* b);
void RegisterMove(LMoveOperands move);
void AddResultMove(LOperand* from, LOperand* to);
void AddResultMove(LGapNode* from, LGapNode* to);
void ResolveCycle(LGapNode* start, LOperand* marker_operand);
ZoneList<LGapNode*> nodes_;
ZoneList<LGapNode*> identified_cycles_;
ZoneList<LMoveOperands> result_;
int next_visited_id_;
};
class LCodeGen BASE_EMBEDDED {
public:
LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
@ -79,10 +56,35 @@ class LCodeGen BASE_EMBEDDED {
scope_(chunk->graph()->info()->scope()),
status_(UNUSED),
deferred_(8),
osr_pc_offset_(-1) {
osr_pc_offset_(-1),
resolver_(this) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
// Simple accessors.
MacroAssembler* masm() const { return masm_; }
// Support for converting LOperands to assembler types.
// LOperand must be a register.
Register ToRegister(LOperand* op) const;
// LOperand is loaded into scratch, unless already a register.
Register EmitLoadRegister(LOperand* op, Register scratch);
// LOperand must be a double register.
DoubleRegister ToDoubleRegister(LOperand* op) const;
// LOperand is loaded into dbl_scratch, unless already a double register.
DoubleRegister EmitLoadDoubleRegister(LOperand* op,
SwVfpRegister flt_scratch,
DoubleRegister dbl_scratch);
int ToInteger32(LConstantOperand* op) const;
Operand ToOperand(LOperand* op);
MemOperand ToMemOperand(LOperand* op) const;
// Returns a MemOperand pointing to the high word of a DoubleStackSlot.
MemOperand ToHighMemOperand(LOperand* op) const;
// Try to generate code for the entire chunk, but it may fail if the
// chunk contains constructs we cannot handle. Returns true if the
// code generation attempt succeeded.
@ -94,8 +96,8 @@ class LCodeGen BASE_EMBEDDED {
// Deferred code support.
template<int T>
void DoDeferredGenericBinaryStub(LTemplateInstruction<1, 2, T>* instr,
Token::Value op);
void DoDeferredBinaryOpStub(LTemplateInstruction<1, 2, T>* instr,
Token::Value op);
void DoDeferredNumberTagD(LNumberTagD* instr);
void DoDeferredNumberTagI(LNumberTagI* instr);
void DoDeferredTaggedToI(LTaggedToI* instr);
@ -136,7 +138,6 @@ class LCodeGen BASE_EMBEDDED {
LChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk_->graph(); }
MacroAssembler* masm() const { return masm_; }
Register scratch0() { return r9; }
DwVfpRegister double_scratch0() { return d0; }
@ -202,24 +203,6 @@ class LCodeGen BASE_EMBEDDED {
Register ToRegister(int index) const;
DoubleRegister ToDoubleRegister(int index) const;
// LOperand must be a register.
Register ToRegister(LOperand* op) const;
// LOperand is loaded into scratch, unless already a register.
Register EmitLoadRegister(LOperand* op, Register scratch);
// LOperand must be a double register.
DoubleRegister ToDoubleRegister(LOperand* op) const;
// LOperand is loaded into dbl_scratch, unless already a double register.
DoubleRegister EmitLoadDoubleRegister(LOperand* op,
SwVfpRegister flt_scratch,
DoubleRegister dbl_scratch);
int ToInteger32(LConstantOperand* op) const;
Operand ToOperand(LOperand* op);
MemOperand ToMemOperand(LOperand* op) const;
// Specific math operations - used from DoUnaryMathOperation.
void EmitIntegerMathAbs(LUnaryMathOperation* instr);
void DoMathAbs(LUnaryMathOperation* instr);
@ -229,6 +212,7 @@ class LCodeGen BASE_EMBEDDED {
Register scratch1,
Register scratch2);
void DoMathFloor(LUnaryMathOperation* instr);
void DoMathRound(LUnaryMathOperation* instr);
void DoMathSqrt(LUnaryMathOperation* instr);
// Support for recording safepoint and position information.
@ -237,6 +221,7 @@ class LCodeGen BASE_EMBEDDED {
int arguments,
int deoptimization_index);
void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
void RecordSafepoint(int deoptimization_index);
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
int deoptimization_index);

View File

@ -0,0 +1,303 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "arm/lithium-gap-resolver-arm.h"
#include "arm/lithium-codegen-arm.h"
namespace v8 {
namespace internal {
static const Register kSavedValueRegister = { 9 };
static const DoubleRegister kSavedDoubleValueRegister = { 0 };
LGapResolver::LGapResolver(LCodeGen* owner)
: cgen_(owner), moves_(32), root_index_(0), in_cycle_(false),
saved_destination_(NULL) { }
void LGapResolver::Resolve(LParallelMove* parallel_move) {
ASSERT(moves_.is_empty());
// Build up a worklist of moves.
BuildInitialMoveList(parallel_move);
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands move = moves_[i];
// Skip constants to perform them last. They don't block other moves
// and skipping such moves with register destinations keeps those
// registers free for the whole algorithm.
if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
root_index_ = i; // Any cycle is found when by reaching this move again.
PerformMove(i);
if (in_cycle_) {
RestoreValue();
}
}
}
// Perform the moves with constant sources.
for (int i = 0; i < moves_.length(); ++i) {
if (!moves_[i].IsEliminated()) {
ASSERT(moves_[i].source()->IsConstantOperand());
EmitMove(i);
}
}
moves_.Rewind(0);
}
void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
// Perform a linear sweep of the moves to add them to the initial list of
// moves to perform, ignoring any move that is redundant (the source is
// the same as the destination, the destination is ignored and
// unallocated, or the move was already eliminated).
const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
for (int i = 0; i < moves->length(); ++i) {
LMoveOperands move = moves->at(i);
if (!move.IsRedundant()) moves_.Add(move);
}
Verify();
}
void LGapResolver::PerformMove(int index) {
// Each call to this function performs a move and deletes it from the move
// graph. We first recursively perform any move blocking this one. We
// mark a move as "pending" on entry to PerformMove in order to detect
// cycles in the move graph.
// We can only find a cycle, when doing a depth-first traversal of moves,
// be encountering the starting move again. So by spilling the source of
// the starting move, we break the cycle. All moves are then unblocked,
// and the starting move is completed by writing the spilled value to
// its destination. All other moves from the spilled source have been
// completed prior to breaking the cycle.
// An additional complication is that moves to MemOperands with large
// offsets (more than 1K or 4K) require us to spill this spilled value to
// the stack, to free up the register.
ASSERT(!moves_[index].IsPending());
ASSERT(!moves_[index].IsRedundant());
// Clear this move's destination to indicate a pending move. The actual
// destination is saved in a stack allocated local. Multiple moves can
// be pending because this function is recursive.
ASSERT(moves_[index].source() != NULL); // Or else it will look eliminated.
LOperand* destination = moves_[index].destination();
moves_[index].set_destination(NULL);
// Perform a depth-first traversal of the move graph to resolve
// dependencies. Any unperformed, unpending move with a source the same
// as this one's destination blocks this one so recursively perform all
// such moves.
for (int i = 0; i < moves_.length(); ++i) {
LMoveOperands other_move = moves_[i];
if (other_move.Blocks(destination) && !other_move.IsPending()) {
PerformMove(i);
// If there is a blocking, pending move it must be moves_[root_index_]
// and all other moves with the same source as moves_[root_index_] are
// sucessfully executed (because they are cycle-free) by this loop.
}
}
// We are about to resolve this move and don't need it marked as
// pending, so restore its destination.
moves_[index].set_destination(destination);
// The move may be blocked on a pending move, which must be the starting move.
// In this case, we have a cycle, and we save the source of this move to
// a scratch register to break it.
LMoveOperands other_move = moves_[root_index_];
if (other_move.Blocks(destination)) {
ASSERT(other_move.IsPending());
BreakCycle(index);
return;
}
// This move is no longer blocked.
EmitMove(index);
}
void LGapResolver::Verify() {
#ifdef ENABLE_SLOW_ASSERTS
// No operand should be the destination for more than one move.
for (int i = 0; i < moves_.length(); ++i) {
LOperand* destination = moves_[i].destination();
for (int j = i + 1; j < moves_.length(); ++j) {
SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
}
}
#endif
}
#define __ ACCESS_MASM(cgen_->masm())
void LGapResolver::BreakCycle(int index) {
// We save in a register the value that should end up in the source of
// moves_[root_index]. After performing all moves in the tree rooted
// in that move, we save the value to that source.
ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
ASSERT(!in_cycle_);
in_cycle_ = true;
LOperand* source = moves_[index].source();
saved_destination_ = moves_[index].destination();
if (source->IsRegister()) {
__ mov(kSavedValueRegister, cgen_->ToRegister(source));
} else if (source->IsStackSlot()) {
__ ldr(kSavedValueRegister, cgen_->ToMemOperand(source));
} else if (source->IsDoubleRegister()) {
__ vmov(kSavedDoubleValueRegister, cgen_->ToDoubleRegister(source));
} else if (source->IsDoubleStackSlot()) {
__ vldr(kSavedDoubleValueRegister, cgen_->ToMemOperand(source));
} else {
UNREACHABLE();
}
// This move will be done by restoring the saved value to the destination.
moves_[index].Eliminate();
}
void LGapResolver::RestoreValue() {
ASSERT(in_cycle_);
ASSERT(saved_destination_ != NULL);
// Spilled value is in kSavedValueRegister or kSavedDoubleValueRegister.
if (saved_destination_->IsRegister()) {
__ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
} else if (saved_destination_->IsStackSlot()) {
__ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
} else if (saved_destination_->IsDoubleRegister()) {
__ vmov(cgen_->ToDoubleRegister(saved_destination_),
kSavedDoubleValueRegister);
} else if (saved_destination_->IsDoubleStackSlot()) {
__ vstr(kSavedDoubleValueRegister,
cgen_->ToMemOperand(saved_destination_));
} else {
UNREACHABLE();
}
in_cycle_ = false;
saved_destination_ = NULL;
}
void LGapResolver::EmitMove(int index) {
LOperand* source = moves_[index].source();
LOperand* destination = moves_[index].destination();
// Dispatch on the source and destination operand kinds. Not all
// combinations are possible.
if (source->IsRegister()) {
Register source_register = cgen_->ToRegister(source);
if (destination->IsRegister()) {
__ mov(cgen_->ToRegister(destination), source_register);
} else {
ASSERT(destination->IsStackSlot());
__ str(source_register, cgen_->ToMemOperand(destination));
}
} else if (source->IsStackSlot()) {
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsRegister()) {
__ ldr(cgen_->ToRegister(destination), source_operand);
} else {
ASSERT(destination->IsStackSlot());
MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (in_cycle_) {
if (!destination_operand.OffsetIsUint12Encodable()) {
// ip is overwritten while saving the value to the destination.
// Therefore we can't use ip. It is OK if the read from the source
// destroys ip, since that happens before the value is read.
__ vldr(kSavedDoubleValueRegister.low(), source_operand);
__ vstr(kSavedDoubleValueRegister.low(), destination_operand);
} else {
__ ldr(ip, source_operand);
__ str(ip, destination_operand);
}
} else {
__ ldr(kSavedValueRegister, source_operand);
__ str(kSavedValueRegister, destination_operand);
}
}
} else if (source->IsConstantOperand()) {
Operand source_operand = cgen_->ToOperand(source);
if (destination->IsRegister()) {
__ mov(cgen_->ToRegister(destination), source_operand);
} else {
ASSERT(destination->IsStackSlot());
ASSERT(!in_cycle_); // Constant moves happen after all cycles are gone.
MemOperand destination_operand = cgen_->ToMemOperand(destination);
__ mov(kSavedValueRegister, source_operand);
__ str(kSavedValueRegister, cgen_->ToMemOperand(destination));
}
} else if (source->IsDoubleRegister()) {
DoubleRegister source_register = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
__ vmov(cgen_->ToDoubleRegister(destination), source_register);
} else {
ASSERT(destination->IsDoubleStackSlot());
MemOperand destination_operand = cgen_->ToMemOperand(destination);
__ vstr(source_register, destination_operand);
}
} else if (source->IsDoubleStackSlot()) {
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsDoubleRegister()) {
__ vldr(cgen_->ToDoubleRegister(destination), source_operand);
} else {
ASSERT(destination->IsDoubleStackSlot());
MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (in_cycle_) {
// kSavedDoubleValueRegister was used to break the cycle,
// but kSavedValueRegister is free.
MemOperand source_high_operand =
cgen_->ToHighMemOperand(source);
MemOperand destination_high_operand =
cgen_->ToHighMemOperand(destination);
__ ldr(kSavedValueRegister, source_operand);
__ str(kSavedValueRegister, destination_operand);
__ ldr(kSavedValueRegister, source_high_operand);
__ str(kSavedValueRegister, destination_high_operand);
} else {
__ vldr(kSavedDoubleValueRegister, source_operand);
__ vstr(kSavedDoubleValueRegister, destination_operand);
}
}
} else {
UNREACHABLE();
}
moves_[index].Eliminate();
}
#undef __
} } // namespace v8::internal

View File

@ -0,0 +1,84 @@
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
#define V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
#include "v8.h"
#include "lithium.h"
namespace v8 {
namespace internal {
class LCodeGen;
class LGapResolver;
class LGapResolver BASE_EMBEDDED {
public:
explicit LGapResolver(LCodeGen* owner);
// Resolve a set of parallel moves, emitting assembler instructions.
void Resolve(LParallelMove* parallel_move);
private:
// Build the initial list of moves.
void BuildInitialMoveList(LParallelMove* parallel_move);
// Perform the move at the moves_ index in question (possibly requiring
// other moves to satisfy dependencies).
void PerformMove(int index);
// If a cycle is found in the series of moves, save the blocking value to
// a scratch register. The cycle must be found by hitting the root of the
// depth-first search.
void BreakCycle(int index);
// After a cycle has been resolved, restore the value from the scratch
// register to its proper destination.
void RestoreValue();
// Emit a move and remove it from the move graph.
void EmitMove(int index);
// Verify the move list before performing moves.
void Verify();
LCodeGen* cgen_;
// List of moves not yet resolved.
ZoneList<LMoveOperands> moves_;
int root_index_;
bool in_cycle_;
LOperand* saved_destination_;
};
} } // namespace v8::internal
#endif // V8_ARM_LITHIUM_GAP_RESOLVER_ARM_H_

View File

@ -485,18 +485,19 @@ void MacroAssembler::PopSafepointRegistersAndDoubles() {
PopSafepointRegisters();
}
void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register reg) {
str(reg, SafepointRegistersAndDoublesSlot(reg));
void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
Register dst) {
str(src, SafepointRegistersAndDoublesSlot(dst));
}
void MacroAssembler::StoreToSafepointRegisterSlot(Register reg) {
str(reg, SafepointRegisterSlot(reg));
void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
str(src, SafepointRegisterSlot(dst));
}
void MacroAssembler::LoadFromSafepointRegisterSlot(Register reg) {
ldr(reg, SafepointRegisterSlot(reg));
void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
ldr(dst, SafepointRegisterSlot(src));
}
@ -745,6 +746,14 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
}
}
void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
#if !defined(USE_ARM_EABI)
UNREACHABLE();
#else
vmov(dst, r0, r1);
#endif
}
void MacroAssembler::InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
@ -2154,11 +2163,22 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
ldr(dst, MemOperand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
ldr(dst, FieldMemOperand(dst, JSFunction::kContextOffset));
}
// The context may be an intermediate context, not a function context.
ldr(dst, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
} else { // Slot is in the current function context.
// The context may be an intermediate context, not a function context.
ldr(dst, MemOperand(cp, Context::SlotOffset(Context::FCONTEXT_INDEX)));
} else {
// Slot is in the current function context. Move it into the
// destination register in case we store into it (the write barrier
// cannot be allowed to destroy the context in esi).
mov(dst, cp);
}
// We should not have found a 'with' context by walking the context chain
// (i.e., the static scope chain and runtime context chain do not agree).
// A variable occurring in such a scope should have slot type LOOKUP and
// not CONTEXT.
if (FLAG_debug_code) {
ldr(ip, MemOperand(dst, Context::SlotOffset(Context::FCONTEXT_INDEX)));
cmp(dst, ip);
Check(eq, "Yo dawg, I heard you liked function contexts "
"so I put function contexts in all your contexts");
}
}

View File

@ -240,12 +240,13 @@ class MacroAssembler: public Assembler {
void PopSafepointRegisters();
void PushSafepointRegistersAndDoubles();
void PopSafepointRegistersAndDoubles();
void StoreToSafepointRegisterSlot(Register reg);
void StoreToSafepointRegistersAndDoublesSlot(Register reg);
void LoadFromSafepointRegisterSlot(Register reg);
static int SafepointRegisterStackIndex(int reg_code);
static MemOperand SafepointRegisterSlot(Register reg);
static MemOperand SafepointRegistersAndDoublesSlot(Register reg);
// Store value in register src in the safepoint stack slot for
// register dst.
void StoreToSafepointRegisterSlot(Register src, Register dst);
void StoreToSafepointRegistersAndDoublesSlot(Register src, Register dst);
// Load the value of the src register from its safepoint stack slot
// into register dst.
void LoadFromSafepointRegisterSlot(Register dst, Register src);
// Load two consecutive registers with two consecutive memory locations.
void Ldrd(Register dst1,
@ -683,6 +684,8 @@ class MacroAssembler: public Assembler {
void CallCFunction(ExternalReference function, int num_arguments);
void CallCFunction(Register function, int num_arguments);
void GetCFunctionDoubleResult(const DoubleRegister dst);
// Calls an API function. Allocates HandleScope, extracts returned value
// from handle and propagates exceptions. Restores context.
// stack_space - space to be unwound on exit (includes the call js
@ -883,10 +886,19 @@ class MacroAssembler: public Assembler {
Register scratch1,
Register scratch2);
// Compute memory operands for safepoint stack slots.
static int SafepointRegisterStackIndex(int reg_code);
MemOperand SafepointRegisterSlot(Register reg);
MemOperand SafepointRegistersAndDoublesSlot(Register reg);
bool generating_stub_;
bool allow_stub_calls_;
// This handle will be patched with the code object on installation.
Handle<Object> code_object_;
// Needs access to SafepointRegisterStackIndex for optimized frame
// traversal.
friend class OptimizedFrame;
};

View File

@ -2332,8 +2332,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
break;
case STRING_CHECK:
if (!function->IsBuiltin()) {
// Calling non-builtins with a value as receiver requires boxing.
if (!function->IsBuiltin() && !function_info->strict_mode()) {
// Calling non-strict non-builtins with a value as the receiver
// requires boxing.
__ jmp(&miss);
} else {
// Check that the object is a two-byte string or a symbol.
@ -2348,8 +2349,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
break;
case NUMBER_CHECK: {
if (!function->IsBuiltin()) {
// Calling non-builtins with a value as receiver requires boxing.
if (!function->IsBuiltin() && !function_info->strict_mode()) {
// Calling non-strict non-builtins with a value as the receiver
// requires boxing.
__ jmp(&miss);
} else {
Label fast;
@ -2369,8 +2371,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
}
case BOOLEAN_CHECK: {
if (!function->IsBuiltin()) {
// Calling non-builtins with a value as receiver requires boxing.
if (!function->IsBuiltin() && !function_info->strict_mode()) {
// Calling non-strict non-builtins with a value as the receiver
// requires boxing.
__ jmp(&miss);
} else {
Label fast;

View File

@ -228,6 +228,7 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
WriteTaggedPC(pc_delta, kEmbeddedObjectTag);
} else if (rmode == RelocInfo::CODE_TARGET) {
WriteTaggedPC(pc_delta, kCodeTargetTag);
ASSERT(begin_pos - pos_ <= RelocInfo::kMaxCallSize);
} else if (RelocInfo::IsPosition(rmode)) {
// Use signed delta-encoding for data.
intptr_t data_delta = rinfo->data() - last_data_;
@ -251,6 +252,7 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
WriteExtraTaggedPC(pc_delta, kPCJumpTag);
WriteExtraTaggedData(rinfo->data() - last_data_, kCommentTag);
last_data_ = rinfo->data();
ASSERT(begin_pos - pos_ == RelocInfo::kRelocCommentSize);
} else {
// For all other modes we simply use the mode as the extra tag.
// None of these modes need a data component.
@ -850,12 +852,14 @@ double power_double_double(double x, double y) {
ExternalReference ExternalReference::power_double_double_function() {
return ExternalReference(Redirect(FUNCTION_ADDR(power_double_double)));
return ExternalReference(Redirect(FUNCTION_ADDR(power_double_double),
FP_RETURN_CALL));
}
ExternalReference ExternalReference::power_double_int_function() {
return ExternalReference(Redirect(FUNCTION_ADDR(power_double_int)));
return ExternalReference(Redirect(FUNCTION_ADDR(power_double_int),
FP_RETURN_CALL));
}

View File

@ -184,6 +184,14 @@ class RelocInfo BASE_EMBEDDED {
// we do not normally record relocation info.
static const char* kFillerCommentString;
// The size of a comment is equal to tree bytes for the extra tagged pc +
// the tag for the data, and kPointerSize for the actual pointer to the
// comment.
static const int kRelocCommentSize = 3 + kPointerSize;
// The maximum size for a call instruction including pc-jump.
static const int kMaxCallSize = 6;
enum Mode {
// Please note the order is important (see IsCodeTarget, IsGCRelocMode).
CONSTRUCT_CALL, // code target that is a call to a JavaScript constructor.

View File

@ -261,10 +261,8 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
Handle<SharedFunctionInfo> shared = info->shared_info();
shared->EnableDeoptimizationSupport(*unoptimized.code());
// The existing unoptimized code was replaced with the new one.
Compiler::RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG,
Handle<String>(shared->DebugName()),
shared->start_position(),
&unoptimized);
Compiler::RecordFunctionCompilation(
Logger::LAZY_COMPILE_TAG, &unoptimized, shared);
}
}
@ -273,7 +271,7 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
// optimizable marker in the code object and optimize anyway. This
// is safe as long as the unoptimized code has deoptimization
// support.
ASSERT(FLAG_always_opt || info->shared_info()->code()->optimizable());
ASSERT(FLAG_always_opt || code->optimizable());
ASSERT(info->shared_info()->has_deoptimization_support());
if (FLAG_trace_hydrogen) {
@ -283,8 +281,7 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
}
TypeFeedbackOracle oracle(
Handle<Code>(info->shared_info()->code()),
Handle<Context>(info->closure()->context()->global_context()));
code, Handle<Context>(info->closure()->context()->global_context()));
HGraphBuilder builder(&oracle);
HPhase phase(HPhase::kTotal);
HGraph* graph = builder.CreateGraph(info);
@ -294,9 +291,9 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
}
if (graph != NULL && FLAG_build_lithium) {
Handle<Code> code = graph->Compile();
if (!code.is_null()) {
info->SetCode(code);
Handle<Code> optimized_code = graph->Compile();
if (!optimized_code.is_null()) {
info->SetCode(optimized_code);
FinishOptimization(info->closure(), start);
return true;
}
@ -415,13 +412,25 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
return Handle<SharedFunctionInfo>::null();
}
// Allocate function.
ASSERT(!info->code().is_null());
Handle<SharedFunctionInfo> result =
Factory::NewSharedFunctionInfo(
lit->name(),
lit->materialized_literal_count(),
info->code(),
SerializedScopeInfo::Create(info->scope()));
ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
Compiler::SetFunctionInfo(result, lit, true, script);
if (script->name()->IsString()) {
PROFILE(CodeCreateEvent(
info->is_eval()
? Logger::EVAL_TAG
: Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
*info->code(),
*result,
String::cast(script->name())));
GDBJIT(AddCode(Handle<String>(String::cast(script->name())),
script,
@ -432,21 +441,11 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
? Logger::EVAL_TAG
: Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
*info->code(),
""));
*result,
Heap::empty_string()));
GDBJIT(AddCode(Handle<String>(), script, info->code()));
}
// Allocate function.
Handle<SharedFunctionInfo> result =
Factory::NewSharedFunctionInfo(
lit->name(),
lit->materialized_literal_count(),
info->code(),
SerializedScopeInfo::Create(info->scope()));
ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
Compiler::SetFunctionInfo(result, lit, true, script);
// Hint to the runtime system used when allocating space for initial
// property space by setting the expected number of properties for
// the instances of the function.
@ -613,10 +612,7 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
ASSERT(!info->code().is_null());
Handle<Code> code = info->code();
Handle<JSFunction> function = info->closure();
RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG,
Handle<String>(shared->DebugName()),
shared->start_position(),
info);
RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
if (info->IsOptimizing()) {
function->ReplaceCode(*code);
@ -724,10 +720,6 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
ASSERT(!info.code().is_null());
// Function compilation complete.
RecordFunctionCompilation(Logger::FUNCTION_TAG,
literal->debug_name(),
literal->start_position(),
&info);
scope_info = SerializedScopeInfo::Create(info.scope());
}
@ -738,6 +730,7 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
info.code(),
scope_info);
SetFunctionInfo(result, literal, false, script);
RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, result);
result->set_allows_lazy_compilation(allow_lazy);
// Set the expected number of properties for instances and return
@ -776,28 +769,31 @@ void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
Handle<String> name,
int start_position,
CompilationInfo* info) {
CompilationInfo* info,
Handle<SharedFunctionInfo> shared) {
// SharedFunctionInfo is passed separately, because if CompilationInfo
// was created using Script object, it will not have it.
// Log the code generation. If source information is available include
// script name and line number. Check explicitly whether logging is
// enabled as finding the line number is not free.
if (Logger::is_logging() ||
CpuProfiler::is_profiling()) {
if (Logger::is_logging() || CpuProfiler::is_profiling()) {
Handle<Script> script = info->script();
Handle<Code> code = info->code();
if (*code == Builtins::builtin(Builtins::LazyCompile)) return;
if (script->name()->IsString()) {
int line_num = GetScriptLineNumber(script, start_position) + 1;
int line_num = GetScriptLineNumber(script, shared->start_position()) + 1;
USE(line_num);
PROFILE(CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
*code,
*name,
*shared,
String::cast(script->name()),
line_num));
} else {
PROFILE(CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
*code,
*name));
*shared,
shared->DebugName()));
}
}

View File

@ -265,9 +265,8 @@ class Compiler : public AllStatic {
#endif
static void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
Handle<String> name,
int start_position,
CompilationInfo* info);
CompilationInfo* info,
Handle<SharedFunctionInfo> shared);
};

View File

@ -41,6 +41,9 @@ namespace internal {
void CodeCreateEventRecord::UpdateCodeMap(CodeMap* code_map) {
code_map->AddCode(start, entry, size);
if (sfi_address != NULL) {
entry->set_shared_id(code_map->GetSFITag(sfi_address));
}
}
@ -54,8 +57,8 @@ void CodeDeleteEventRecord::UpdateCodeMap(CodeMap* code_map) {
}
void CodeAliasEventRecord::UpdateCodeMap(CodeMap* code_map) {
code_map->AddAlias(start, entry, code_start);
void SFIMoveEventRecord::UpdateCodeMap(CodeMap* code_map) {
code_map->MoveCode(from, to);
}

View File

@ -53,13 +53,7 @@ ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator)
ticks_buffer_(sizeof(TickSampleEventRecord),
kTickSamplesBufferChunkSize,
kTickSamplesBufferChunksCount),
enqueue_order_(0),
known_functions_(new HashMap(AddressesMatch)) {
}
ProfilerEventsProcessor::~ProfilerEventsProcessor() {
delete known_functions_;
enqueue_order_(0) {
}
@ -75,6 +69,7 @@ void ProfilerEventsProcessor::CallbackCreateEvent(Logger::LogEventsAndTags tag,
rec->start = start;
rec->entry = generator_->NewCodeEntry(tag, prefix, name);
rec->size = 1;
rec->sfi_address = NULL;
events_buffer_.Enqueue(evt_rec);
}
@ -84,7 +79,8 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
String* resource_name,
int line_number,
Address start,
unsigned size) {
unsigned size,
Address sfi_address) {
if (FilterOutCodeCreateEvent(tag)) return;
CodeEventsContainer evt_rec;
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
@ -93,6 +89,7 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
rec->start = start;
rec->entry = generator_->NewCodeEntry(tag, name, resource_name, line_number);
rec->size = size;
rec->sfi_address = sfi_address;
events_buffer_.Enqueue(evt_rec);
}
@ -109,6 +106,7 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
rec->start = start;
rec->entry = generator_->NewCodeEntry(tag, name);
rec->size = size;
rec->sfi_address = NULL;
events_buffer_.Enqueue(evt_rec);
}
@ -125,6 +123,7 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
rec->start = start;
rec->entry = generator_->NewCodeEntry(tag, args_count);
rec->size = size;
rec->sfi_address = NULL;
events_buffer_.Enqueue(evt_rec);
}
@ -150,57 +149,14 @@ void ProfilerEventsProcessor::CodeDeleteEvent(Address from) {
}
void ProfilerEventsProcessor::FunctionCreateEvent(Address alias,
Address start,
int security_token_id) {
void ProfilerEventsProcessor::SFIMoveEvent(Address from, Address to) {
CodeEventsContainer evt_rec;
CodeAliasEventRecord* rec = &evt_rec.CodeAliasEventRecord_;
rec->type = CodeEventRecord::CODE_ALIAS;
SFIMoveEventRecord* rec = &evt_rec.SFIMoveEventRecord_;
rec->type = CodeEventRecord::SFI_MOVE;
rec->order = ++enqueue_order_;
rec->start = alias;
rec->entry = generator_->NewCodeEntry(security_token_id);
rec->code_start = start;
rec->from = from;
rec->to = to;
events_buffer_.Enqueue(evt_rec);
known_functions_->Lookup(alias, AddressHash(alias), true);
}
void ProfilerEventsProcessor::FunctionMoveEvent(Address from, Address to) {
CodeMoveEvent(from, to);
if (IsKnownFunction(from)) {
known_functions_->Remove(from, AddressHash(from));
known_functions_->Lookup(to, AddressHash(to), true);
}
}
void ProfilerEventsProcessor::FunctionDeleteEvent(Address from) {
CodeDeleteEvent(from);
known_functions_->Remove(from, AddressHash(from));
}
bool ProfilerEventsProcessor::IsKnownFunction(Address start) {
HashMap::Entry* entry =
known_functions_->Lookup(start, AddressHash(start), false);
return entry != NULL;
}
void ProfilerEventsProcessor::ProcessMovedFunctions() {
for (int i = 0; i < moved_functions_.length(); ++i) {
JSFunction* function = moved_functions_[i];
CpuProfiler::FunctionCreateEvent(function);
}
moved_functions_.Clear();
}
void ProfilerEventsProcessor::RememberMovedFunction(JSFunction* function) {
moved_functions_.Add(function);
}
@ -227,13 +183,12 @@ void ProfilerEventsProcessor::AddCurrentStack() {
TickSample* sample = &record.sample;
sample->state = Top::current_vm_state();
sample->pc = reinterpret_cast<Address>(sample); // Not NULL.
sample->tos = NULL;
sample->frames_count = 0;
for (StackTraceFrameIterator it;
!it.done() && sample->frames_count < TickSample::kMaxFramesCount;
it.Advance()) {
JavaScriptFrame* frame = it.frame();
sample->stack[sample->frames_count++] =
reinterpret_cast<Address>(frame->function());
sample->stack[sample->frames_count++] = it.frame()->pc();
}
record.order = enqueue_order_;
ticks_from_vm_buffer_.Enqueue(record);
@ -393,20 +348,38 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Heap::empty_string(),
v8::CpuProfileNode::kNoLineNumberInfo,
code->address(),
code->ExecutableSize());
code->ExecutableSize(),
NULL);
}
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, String* name,
String* source, int line) {
Code* code,
SharedFunctionInfo* shared,
String* name) {
singleton_->processor_->CodeCreateEvent(
tag,
name,
Heap::empty_string(),
v8::CpuProfileNode::kNoLineNumberInfo,
code->address(),
code->ExecutableSize(),
shared->address());
}
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
String* source, int line) {
singleton_->processor_->CodeCreateEvent(
tag,
shared->DebugName(),
source,
line,
code->address(),
code->ExecutableSize());
code->ExecutableSize(),
shared->address());
}
@ -430,44 +403,8 @@ void CpuProfiler::CodeDeleteEvent(Address from) {
}
void CpuProfiler::FunctionCreateEvent(JSFunction* function) {
int security_token_id = TokenEnumerator::kNoSecurityToken;
if (function->unchecked_context()->IsContext()) {
security_token_id = singleton_->token_enumerator_->GetTokenId(
function->context()->global_context()->security_token());
}
singleton_->processor_->FunctionCreateEvent(
function->address(),
function->shared()->code()->address(),
security_token_id);
}
void CpuProfiler::ProcessMovedFunctions() {
singleton_->processor_->ProcessMovedFunctions();
}
void CpuProfiler::FunctionCreateEventFromMove(JSFunction* function) {
// This function is called from GC iterators (during Scavenge,
// MC, and MS), so marking bits can be set on objects. That's
// why unchecked accessors are used here.
// The same function can be reported several times.
if (function->unchecked_code() == Builtins::builtin(Builtins::LazyCompile)
|| singleton_->processor_->IsKnownFunction(function->address())) return;
singleton_->processor_->RememberMovedFunction(function);
}
void CpuProfiler::FunctionMoveEvent(Address from, Address to) {
singleton_->processor_->FunctionMoveEvent(from, to);
}
void CpuProfiler::FunctionDeleteEvent(Address from) {
singleton_->processor_->FunctionDeleteEvent(from);
void CpuProfiler::SFIMoveEvent(Address from, Address to) {
singleton_->processor_->SFIMoveEvent(from, to);
}
@ -539,7 +476,6 @@ void CpuProfiler::StartProcessorIfNotStarted() {
FLAG_log_code = saved_log_code_flag;
}
Logger::LogCompiledFunctions();
Logger::LogFunctionObjects();
Logger::LogAccessorCallbacks();
}
// Enable stack sampling.

View File

@ -50,7 +50,7 @@ class TokenEnumerator;
V(CODE_CREATION, CodeCreateEventRecord) \
V(CODE_MOVE, CodeMoveEventRecord) \
V(CODE_DELETE, CodeDeleteEventRecord) \
V(CODE_ALIAS, CodeAliasEventRecord)
V(SFI_MOVE, SFIMoveEventRecord)
class CodeEventRecord {
@ -73,6 +73,7 @@ class CodeCreateEventRecord : public CodeEventRecord {
Address start;
CodeEntry* entry;
unsigned size;
Address sfi_address;
INLINE(void UpdateCodeMap(CodeMap* code_map));
};
@ -95,11 +96,10 @@ class CodeDeleteEventRecord : public CodeEventRecord {
};
class CodeAliasEventRecord : public CodeEventRecord {
class SFIMoveEventRecord : public CodeEventRecord {
public:
Address start;
CodeEntry* entry;
Address code_start;
Address from;
Address to;
INLINE(void UpdateCodeMap(CodeMap* code_map));
};
@ -134,7 +134,7 @@ class TickSampleEventRecord BASE_EMBEDDED {
class ProfilerEventsProcessor : public Thread {
public:
explicit ProfilerEventsProcessor(ProfileGenerator* generator);
virtual ~ProfilerEventsProcessor();
virtual ~ProfilerEventsProcessor() {}
// Thread control.
virtual void Run();
@ -148,7 +148,8 @@ class ProfilerEventsProcessor : public Thread {
void CodeCreateEvent(Logger::LogEventsAndTags tag,
String* name,
String* resource_name, int line_number,
Address start, unsigned size);
Address start, unsigned size,
Address sfi_address);
void CodeCreateEvent(Logger::LogEventsAndTags tag,
const char* name,
Address start, unsigned size);
@ -157,17 +158,12 @@ class ProfilerEventsProcessor : public Thread {
Address start, unsigned size);
void CodeMoveEvent(Address from, Address to);
void CodeDeleteEvent(Address from);
void FunctionCreateEvent(Address alias, Address start, int security_token_id);
void FunctionMoveEvent(Address from, Address to);
void FunctionDeleteEvent(Address from);
void SFIMoveEvent(Address from, Address to);
void RegExpCodeCreateEvent(Logger::LogEventsAndTags tag,
const char* prefix, String* name,
Address start, unsigned size);
// Puts current stack into tick sample events buffer.
void AddCurrentStack();
bool IsKnownFunction(Address start);
void ProcessMovedFunctions();
void RememberMovedFunction(JSFunction* function);
// Tick sample events are filled directly in the buffer of the circular
// queue (because the structure is of fixed width, but usually not all
@ -188,13 +184,6 @@ class ProfilerEventsProcessor : public Thread {
bool ProcessTicks(unsigned dequeue_order);
INLINE(static bool FilterOutCodeCreateEvent(Logger::LogEventsAndTags tag));
INLINE(static bool AddressesMatch(void* key1, void* key2)) {
return key1 == key2;
}
INLINE(static uint32_t AddressHash(Address addr)) {
return ComputeIntegerHash(
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr)));
}
ProfileGenerator* generator_;
bool running_;
@ -202,10 +191,6 @@ class ProfilerEventsProcessor : public Thread {
SamplingCircularQueue ticks_buffer_;
UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
unsigned enqueue_order_;
// Used from the VM thread.
HashMap* known_functions_;
List<JSFunction*> moved_functions_;
};
} } // namespace v8::internal
@ -251,23 +236,22 @@ class CpuProfiler {
static void CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, String* name);
static void CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, String* name,
Code* code,
SharedFunctionInfo *shared,
String* name);
static void CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code,
SharedFunctionInfo *shared,
String* source, int line);
static void CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, int args_count);
static void CodeMovingGCEvent() {}
static void CodeMoveEvent(Address from, Address to);
static void CodeDeleteEvent(Address from);
static void FunctionCreateEvent(JSFunction* function);
// Reports function creation in case we had missed it (e.g.
// if it was created from compiled code).
static void FunctionCreateEventFromMove(JSFunction* function);
static void FunctionMoveEvent(Address from, Address to);
static void FunctionDeleteEvent(Address from);
static void GetterCallbackEvent(String* name, Address entry_point);
static void RegExpCodeCreateEvent(Code* code, String* source);
static void ProcessMovedFunctions();
static void SetterCallbackEvent(String* name, Address entry_point);
static void SFIMoveEvent(Address from, Address to);
static INLINE(bool is_profiling()) {
return NoBarrier_Load(&is_profiling_);

View File

@ -106,6 +106,11 @@ static Handle<Object> Invoke(bool construct,
ASSERT(*has_pending_exception == Top::has_pending_exception());
if (*has_pending_exception) {
Top::ReportPendingMessages();
if (Top::pending_exception() == Failure::OutOfMemoryException()) {
if (!HandleScopeImplementer::instance()->ignore_out_of_memory()) {
V8::FatalProcessOutOfMemory("JS", true);
}
}
return Handle<Object>();
} else {
Top::clear_pending_message();

View File

@ -120,6 +120,7 @@ DEFINE_bool(time_hydrogen, false, "timing for hydrogen")
DEFINE_bool(trace_hydrogen, false, "trace generated hydrogen to file")
DEFINE_bool(trace_inlining, false, "trace inlining decisions")
DEFINE_bool(trace_alloc, false, "trace register allocator")
DEFINE_bool(trace_all_uses, false, "trace all use positions")
DEFINE_bool(trace_range, false, "trace range analysis")
DEFINE_bool(trace_gvn, false, "trace global value numbering")
DEFINE_bool(trace_representation, false, "trace representation types")
@ -134,7 +135,11 @@ DEFINE_bool(deoptimize_uncommon_cases, true, "deoptimize uncommon cases")
DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining")
DEFINE_bool(aggressive_loop_invariant_motion, true,
"aggressive motion of instructions out of loops")
#ifdef V8_TARGET_ARCH_X64
DEFINE_bool(use_osr, false, "use on-stack replacement")
#else
DEFINE_bool(use_osr, true, "use on-stack replacement")
#endif
DEFINE_bool(trace_osr, false, "trace on-stack replacement")
DEFINE_int(stress_runs, 0, "number of stress runs")
DEFINE_bool(optimize_closures, true, "optimize closures")

View File

@ -1411,9 +1411,8 @@ static void AddUnwindInfo(CodeDescription *desc) {
#ifdef V8_TARGET_ARCH_X64
if (desc->tag() == GDBJITInterface::FUNCTION) {
// To avoid propagating unwinding information through
// compilation pipeline we rely on function prologue
// and epilogue being the same for all code objects generated
// by the full code generator.
// compilation pipeline we use an approximation.
// For most use cases this should not affect usability.
static const int kFramePointerPushOffset = 1;
static const int kFramePointerSetOffset = 4;
static const int kFramePointerPopOffset = -3;
@ -1427,19 +1426,6 @@ static void AddUnwindInfo(CodeDescription *desc) {
uintptr_t frame_pointer_pop_address =
desc->CodeEnd() + kFramePointerPopOffset;
#ifdef DEBUG
static const uint8_t kFramePointerPushInstruction = 0x48; // push ebp
static const uint16_t kFramePointerSetInstruction = 0x5756; // mov ebp, esp
static const uint8_t kFramePointerPopInstruction = 0xBE; // pop ebp
ASSERT(*reinterpret_cast<uint8_t*>(frame_pointer_push_address) ==
kFramePointerPushInstruction);
ASSERT(*reinterpret_cast<uint16_t*>(frame_pointer_set_address) ==
kFramePointerSetInstruction);
ASSERT(*reinterpret_cast<uint8_t*>(frame_pointer_pop_address) ==
kFramePointerPopInstruction);
#endif
desc->SetStackStateStartAddress(CodeDescription::POST_RBP_PUSH,
frame_pointer_push_address);
desc->SetStackStateStartAddress(CodeDescription::POST_RBP_SET,

View File

@ -834,49 +834,39 @@ bool CompileLazyShared(Handle<SharedFunctionInfo> shared,
}
bool CompileLazy(Handle<JSFunction> function,
ClearExceptionFlag flag) {
static bool CompileLazyFunction(Handle<JSFunction> function,
ClearExceptionFlag flag,
InLoopFlag in_loop_flag) {
bool result = true;
if (function->shared()->is_compiled()) {
function->ReplaceCode(function->shared()->code());
function->shared()->set_code_age(0);
} else {
CompilationInfo info(function);
if (in_loop_flag == IN_LOOP) info.MarkAsInLoop();
result = CompileLazyHelper(&info, flag);
ASSERT(!result || function->is_compiled());
}
if (result && function->is_compiled()) {
PROFILE(FunctionCreateEvent(*function));
}
return result;
}
bool CompileLazy(Handle<JSFunction> function,
ClearExceptionFlag flag) {
return CompileLazyFunction(function, flag, NOT_IN_LOOP);
}
bool CompileLazyInLoop(Handle<JSFunction> function,
ClearExceptionFlag flag) {
bool result = true;
if (function->shared()->is_compiled()) {
function->ReplaceCode(function->shared()->code());
function->shared()->set_code_age(0);
} else {
CompilationInfo info(function);
info.MarkAsInLoop();
result = CompileLazyHelper(&info, flag);
ASSERT(!result || function->is_compiled());
}
if (result && function->is_compiled()) {
PROFILE(FunctionCreateEvent(*function));
}
return result;
return CompileLazyFunction(function, flag, IN_LOOP);
}
bool CompileOptimized(Handle<JSFunction> function, int osr_ast_id) {
CompilationInfo info(function);
info.SetOptimizing(osr_ast_id);
bool result = CompileLazyHelper(&info, KEEP_EXCEPTION);
if (result) PROFILE(FunctionCreateEvent(*function));
return result;
return CompileLazyHelper(&info, KEEP_EXCEPTION);
}

204
deps/v8/src/heap.cc vendored
View File

@ -134,7 +134,7 @@ Heap::HeapState Heap::gc_state_ = NOT_IN_GC;
int Heap::mc_count_ = 0;
int Heap::ms_count_ = 0;
int Heap::gc_count_ = 0;
unsigned int Heap::gc_count_ = 0;
GCTracer* Heap::tracer_ = NULL;
@ -515,7 +515,6 @@ bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (FLAG_log_gc) HeapProfiler::WriteSample();
if (CpuProfiler::is_profiling()) CpuProfiler::ProcessMovedFunctions();
#endif
return next_gc_likely_to_collect_more;
@ -1350,9 +1349,8 @@ class ScavengingVisitor : public StaticVisitorBase {
HEAP_PROFILE(ObjectMoveEvent(source->address(), target->address()));
#if defined(ENABLE_LOGGING_AND_PROFILING)
if (Logger::is_logging() || CpuProfiler::is_profiling()) {
if (target->IsJSFunction()) {
PROFILE(FunctionMoveEvent(source->address(), target->address()));
PROFILE(FunctionCreateEventFromMove(JSFunction::cast(target)));
if (target->IsSharedFunctionInfo()) {
PROFILE(SFIMoveEvent(source->address(), target->address()));
}
}
#endif
@ -2924,9 +2922,8 @@ MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
// constructor to the function.
Object* result;
{ MaybeObject* maybe_result =
JSObject::cast(prototype)->SetProperty(constructor_symbol(),
function,
DONT_ENUM);
JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
constructor_symbol(), function, DONT_ENUM);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
return prototype;
@ -3797,9 +3794,9 @@ bool Heap::IdleNotification() {
static const int kIdlesBeforeMarkSweep = 7;
static const int kIdlesBeforeMarkCompact = 8;
static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
static const int kGCsBetweenCleanup = 4;
static const unsigned int kGCsBetweenCleanup = 4;
static int number_idle_notifications = 0;
static int last_gc_count = gc_count_;
static unsigned int last_gc_count = gc_count_;
bool uncommit = true;
bool finished = false;
@ -3808,7 +3805,7 @@ bool Heap::IdleNotification() {
// GCs have taken place. This allows another round of cleanup based
// on idle notifications if enough work has been carried out to
// provoke a number of garbage collections.
if (gc_count_ < last_gc_count + kGCsBetweenCleanup) {
if (gc_count_ - last_gc_count < kGCsBetweenCleanup) {
number_idle_notifications =
Min(number_idle_notifications + 1, kMaxIdleCount);
} else {
@ -5182,32 +5179,77 @@ void HeapIterator::reset() {
}
#ifdef DEBUG
#if defined(DEBUG) || defined(LIVE_OBJECT_LIST)
static bool search_for_any_global;
static Object* search_target;
static bool found_target;
static List<Object*> object_stack(20);
Object* const PathTracer::kAnyGlobalObject = reinterpret_cast<Object*>(NULL);
// Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
static const int kMarkTag = 2;
static void MarkObjectRecursively(Object** p);
class MarkObjectVisitor : public ObjectVisitor {
class PathTracer::MarkVisitor: public ObjectVisitor {
public:
explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
void VisitPointers(Object** start, Object** end) {
// Copy all HeapObject pointers in [start, end)
for (Object** p = start; p < end; p++) {
// Scan all HeapObject pointers in [start, end)
for (Object** p = start; !tracer_->found() && (p < end); p++) {
if ((*p)->IsHeapObject())
MarkObjectRecursively(p);
tracer_->MarkRecursively(p, this);
}
}
private:
PathTracer* tracer_;
};
static MarkObjectVisitor mark_visitor;
static void MarkObjectRecursively(Object** p) {
class PathTracer::UnmarkVisitor: public ObjectVisitor {
public:
explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
void VisitPointers(Object** start, Object** end) {
// Scan all HeapObject pointers in [start, end)
for (Object** p = start; p < end; p++) {
if ((*p)->IsHeapObject())
tracer_->UnmarkRecursively(p, this);
}
}
private:
PathTracer* tracer_;
};
void PathTracer::VisitPointers(Object** start, Object** end) {
bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
// Visit all HeapObject pointers in [start, end)
for (Object** p = start; !done && (p < end); p++) {
if ((*p)->IsHeapObject()) {
TracePathFrom(p);
done = ((what_to_find_ == FIND_FIRST) && found_target_);
}
}
}
void PathTracer::Reset() {
found_target_ = false;
object_stack_.Clear();
}
void PathTracer::TracePathFrom(Object** root) {
ASSERT((search_target_ == kAnyGlobalObject) ||
search_target_->IsHeapObject());
found_target_in_trace_ = false;
object_stack_.Clear();
MarkVisitor mark_visitor(this);
MarkRecursively(root, &mark_visitor);
UnmarkVisitor unmark_visitor(this);
UnmarkRecursively(root, &unmark_visitor);
ProcessResults();
}
void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
if (!(*p)->IsHeapObject()) return;
HeapObject* obj = HeapObject::cast(*p);
@ -5216,14 +5258,17 @@ static void MarkObjectRecursively(Object** p) {
if (!map->IsHeapObject()) return; // visited before
if (found_target) return; // stop if target found
object_stack.Add(obj);
if ((search_for_any_global && obj->IsJSGlobalObject()) ||
(!search_for_any_global && (obj == search_target))) {
found_target = true;
if (found_target_in_trace_) return; // stop if target found
object_stack_.Add(obj);
if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
(obj == search_target_)) {
found_target_in_trace_ = true;
found_target_ = true;
return;
}
bool is_global_context = obj->IsGlobalContext();
// not visited yet
Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
@ -5231,31 +5276,30 @@ static void MarkObjectRecursively(Object** p) {
obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
MarkObjectRecursively(&map);
// Scan the object body.
if (is_global_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
// This is specialized to scan Context's properly.
Object** start = reinterpret_cast<Object**>(obj->address() +
Context::kHeaderSize);
Object** end = reinterpret_cast<Object**>(obj->address() +
Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
mark_visitor->VisitPointers(start, end);
} else {
obj->IterateBody(map_p->instance_type(),
obj->SizeFromMap(map_p),
mark_visitor);
}
obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
&mark_visitor);
// Scan the map after the body because the body is a lot more interesting
// when doing leak detection.
MarkRecursively(&map, mark_visitor);
if (!found_target) // don't pop if found the target
object_stack.RemoveLast();
if (!found_target_in_trace_) // don't pop if found the target
object_stack_.RemoveLast();
}
static void UnmarkObjectRecursively(Object** p);
class UnmarkObjectVisitor : public ObjectVisitor {
public:
void VisitPointers(Object** start, Object** end) {
// Copy all HeapObject pointers in [start, end)
for (Object** p = start; p < end; p++) {
if ((*p)->IsHeapObject())
UnmarkObjectRecursively(p);
}
}
};
static UnmarkObjectVisitor unmark_visitor;
static void UnmarkObjectRecursively(Object** p) {
void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
if (!(*p)->IsHeapObject()) return;
HeapObject* obj = HeapObject::cast(*p);
@ -5274,63 +5318,38 @@ static void UnmarkObjectRecursively(Object** p) {
obj->set_map(reinterpret_cast<Map*>(map_p));
UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
obj->IterateBody(Map::cast(map_p)->instance_type(),
obj->SizeFromMap(Map::cast(map_p)),
&unmark_visitor);
unmark_visitor);
}
static void MarkRootObjectRecursively(Object** root) {
if (search_for_any_global) {
ASSERT(search_target == NULL);
} else {
ASSERT(search_target->IsHeapObject());
}
found_target = false;
object_stack.Clear();
MarkObjectRecursively(root);
UnmarkObjectRecursively(root);
if (found_target) {
void PathTracer::ProcessResults() {
if (found_target_) {
PrintF("=====================================\n");
PrintF("==== Path to object ====\n");
PrintF("=====================================\n\n");
ASSERT(!object_stack.is_empty());
for (int i = 0; i < object_stack.length(); i++) {
ASSERT(!object_stack_.is_empty());
for (int i = 0; i < object_stack_.length(); i++) {
if (i > 0) PrintF("\n |\n |\n V\n\n");
Object* obj = object_stack[i];
Object* obj = object_stack_[i];
obj->Print();
}
PrintF("=====================================\n");
}
}
#endif // DEBUG || LIVE_OBJECT_LIST
// Helper class for visiting HeapObjects recursively.
class MarkRootVisitor: public ObjectVisitor {
public:
void VisitPointers(Object** start, Object** end) {
// Visit all HeapObject pointers in [start, end)
for (Object** p = start; p < end; p++) {
if ((*p)->IsHeapObject())
MarkRootObjectRecursively(p);
}
}
};
#ifdef DEBUG
// Triggers a depth-first traversal of reachable objects from roots
// and finds a path to a specific heap object and prints it.
void Heap::TracePathToObject(Object* target) {
search_target = target;
search_for_any_global = false;
MarkRootVisitor root_visitor;
IterateRoots(&root_visitor, VISIT_ONLY_STRONG);
PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
IterateRoots(&tracer, VISIT_ONLY_STRONG);
}
@ -5338,11 +5357,10 @@ void Heap::TracePathToObject(Object* target) {
// and finds a path to any global object and prints it. Useful for
// determining the source for leaks of global objects.
void Heap::TracePathToGlobal() {
search_target = NULL;
search_for_any_global = true;
MarkRootVisitor root_visitor;
IterateRoots(&root_visitor, VISIT_ONLY_STRONG);
PathTracer tracer(PathTracer::kAnyGlobalObject,
PathTracer::FIND_ALL,
VISIT_ALL);
IterateRoots(&tracer, VISIT_ONLY_STRONG);
}
#endif

67
deps/v8/src/heap.h vendored
View File

@ -30,6 +30,8 @@
#include <math.h>
#include "globals.h"
#include "list.h"
#include "spaces.h"
#include "splay-tree-inl.h"
#include "v8-counters.h"
@ -1180,7 +1182,7 @@ class Heap : public AllStatic {
static int mc_count_; // how many mark-compact collections happened
static int ms_count_; // how many mark-sweep collections happened
static int gc_count_; // how many gc happened
static unsigned int gc_count_; // how many gc happened
// Total length of the strings we failed to flatten since the last GC.
static int unflattened_strings_length_;
@ -1907,7 +1909,7 @@ class GCTracer BASE_EMBEDDED {
void set_collector(GarbageCollector collector) { collector_ = collector; }
// Sets the GC count.
void set_gc_count(int count) { gc_count_ = count; }
void set_gc_count(unsigned int count) { gc_count_ = count; }
// Sets the full GC count.
void set_full_gc_count(int count) { full_gc_count_ = count; }
@ -1950,7 +1952,7 @@ class GCTracer BASE_EMBEDDED {
// A count (including this one, eg, the first collection is 1) of the
// number of garbage collections.
int gc_count_;
unsigned int gc_count_;
// A count (including this one) of the number of full garbage collections.
int full_gc_count_;
@ -2152,6 +2154,65 @@ class WeakObjectRetainer {
};
#if defined(DEBUG) || defined(LIVE_OBJECT_LIST)
// Helper class for tracing paths to a search target Object from all roots.
// The TracePathFrom() method can be used to trace paths from a specific
// object to the search target object.
class PathTracer : public ObjectVisitor {
public:
enum WhatToFind {
FIND_ALL, // Will find all matches.
FIND_FIRST // Will stop the search after first match.
};
// For the WhatToFind arg, if FIND_FIRST is specified, tracing will stop
// after the first match. If FIND_ALL is specified, then tracing will be
// done for all matches.
PathTracer(Object* search_target,
WhatToFind what_to_find,
VisitMode visit_mode)
: search_target_(search_target),
found_target_(false),
found_target_in_trace_(false),
what_to_find_(what_to_find),
visit_mode_(visit_mode),
object_stack_(20),
no_alloc() {}
virtual void VisitPointers(Object** start, Object** end);
void Reset();
void TracePathFrom(Object** root);
bool found() const { return found_target_; }
static Object* const kAnyGlobalObject;
protected:
class MarkVisitor;
class UnmarkVisitor;
void MarkRecursively(Object** p, MarkVisitor* mark_visitor);
void UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor);
virtual void ProcessResults();
// Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
static const int kMarkTag = 2;
Object* search_target_;
bool found_target_;
bool found_target_in_trace_;
WhatToFind what_to_find_;
VisitMode visit_mode_;
List<Object*> object_stack_;
AssertNoAllocation no_alloc; // i.e. no gc allowed.
DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer);
};
#endif // DEBUG || LIVE_OBJECT_LIST
} } // namespace v8::internal
#endif // V8_HEAP_H_

View File

@ -57,10 +57,13 @@ const char* Representation::Mnemonic() const {
case kTagged: return "t";
case kDouble: return "d";
case kInteger32: return "i";
default:
case kExternal: return "x";
case kNumRepresentations:
UNREACHABLE();
return NULL;
}
UNREACHABLE();
return NULL;
}
@ -221,7 +224,7 @@ HType HType::TypeFromValue(Handle<Object> value) {
}
int HValue::LookupOperandIndex(int occurrence_index, HValue* op) const {
int HValue::LookupOperandIndex(int occurrence_index, HValue* op) {
for (int i = 0; i < OperandCount(); ++i) {
if (OperandAt(i) == op) {
if (occurrence_index == 0) return i;
@ -237,7 +240,7 @@ bool HValue::IsDefinedAfter(HBasicBlock* other) const {
}
bool HValue::UsesMultipleTimes(HValue* op) const {
bool HValue::UsesMultipleTimes(HValue* op) {
bool seen = false;
for (int i = 0; i < OperandCount(); ++i) {
if (OperandAt(i) == op) {
@ -249,7 +252,7 @@ bool HValue::UsesMultipleTimes(HValue* op) const {
}
bool HValue::Equals(HValue* other) const {
bool HValue::Equals(HValue* other) {
if (other->opcode() != opcode()) return false;
if (!other->representation().Equals(representation())) return false;
if (!other->type_.Equals(type_)) return false;
@ -264,7 +267,7 @@ bool HValue::Equals(HValue* other) const {
}
intptr_t HValue::Hashcode() const {
intptr_t HValue::Hashcode() {
intptr_t result = opcode();
int count = OperandCount();
for (int i = 0; i < count; ++i) {
@ -281,33 +284,6 @@ void HValue::SetOperandAt(int index, HValue* value) {
}
void HLoadKeyedGeneric::InternalSetOperandAt(int index, HValue* value) {
if (index < 2) {
operands_[index] = value;
} else {
context_ = value;
}
}
void HStoreKeyedGeneric::InternalSetOperandAt(int index, HValue* value) {
if (index < 3) {
operands_[index] = value;
} else {
context_ = value;
}
}
void HStoreNamedGeneric::InternalSetOperandAt(int index, HValue* value) {
if (index < 2) {
operands_[index] = value;
} else {
context_ = value;
}
}
void HValue::ReplaceAndDelete(HValue* other) {
ReplaceValue(other);
Delete();
@ -438,7 +414,7 @@ void HValue::ComputeInitialRange() {
}
void HInstruction::PrintTo(StringStream* stream) const {
void HInstruction::PrintTo(StringStream* stream) {
stream->Add("%s", Mnemonic());
if (HasSideEffects()) stream->Add("*");
stream->Add(" ");
@ -561,69 +537,64 @@ void HInstruction::Verify() {
#endif
void HCall::PrintDataTo(StringStream* stream) const {
void HUnaryCall::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" ");
stream->Add("#%d", argument_count());
}
void HUnaryCall::PrintDataTo(StringStream* stream) const {
value()->PrintNameTo(stream);
stream->Add(" ");
HCall::PrintDataTo(stream);
}
void HBinaryCall::PrintDataTo(StringStream* stream) const {
void HBinaryCall::PrintDataTo(StringStream* stream) {
first()->PrintNameTo(stream);
stream->Add(" ");
second()->PrintNameTo(stream);
stream->Add(" ");
HCall::PrintDataTo(stream);
stream->Add("#%d", argument_count());
}
void HCallConstantFunction::PrintDataTo(StringStream* stream) const {
void HCallConstantFunction::PrintDataTo(StringStream* stream) {
if (IsApplyFunction()) {
stream->Add("optimized apply ");
} else {
stream->Add("%o ", function()->shared()->DebugName());
}
HCall::PrintDataTo(stream);
stream->Add("#%d", argument_count());
}
void HCallNamed::PrintDataTo(StringStream* stream) const {
void HCallNamed::PrintDataTo(StringStream* stream) {
stream->Add("%o ", *name());
HUnaryCall::PrintDataTo(stream);
}
void HCallGlobal::PrintDataTo(StringStream* stream) const {
void HCallGlobal::PrintDataTo(StringStream* stream) {
stream->Add("%o ", *name());
HUnaryCall::PrintDataTo(stream);
}
void HCallKnownGlobal::PrintDataTo(StringStream* stream) const {
void HCallKnownGlobal::PrintDataTo(StringStream* stream) {
stream->Add("o ", target()->shared()->DebugName());
HCall::PrintDataTo(stream);
stream->Add("#%d", argument_count());
}
void HCallRuntime::PrintDataTo(StringStream* stream) const {
void HCallRuntime::PrintDataTo(StringStream* stream) {
stream->Add("%o ", *name());
HCall::PrintDataTo(stream);
stream->Add("#%d", argument_count());
}
void HClassOfTest::PrintDataTo(StringStream* stream) const {
void HClassOfTest::PrintDataTo(StringStream* stream) {
stream->Add("class_of_test(");
value()->PrintNameTo(stream);
stream->Add(", \"%o\")", *class_name());
}
void HAccessArgumentsAt::PrintDataTo(StringStream* stream) const {
void HAccessArgumentsAt::PrintDataTo(StringStream* stream) {
arguments()->PrintNameTo(stream);
stream->Add("[");
index()->PrintNameTo(stream);
@ -632,7 +603,7 @@ void HAccessArgumentsAt::PrintDataTo(StringStream* stream) const {
}
void HControlInstruction::PrintDataTo(StringStream* stream) const {
void HControlInstruction::PrintDataTo(StringStream* stream) {
if (FirstSuccessor() != NULL) {
int first_id = FirstSuccessor()->block_id();
if (SecondSuccessor() == NULL) {
@ -645,13 +616,13 @@ void HControlInstruction::PrintDataTo(StringStream* stream) const {
}
void HUnaryControlInstruction::PrintDataTo(StringStream* stream) const {
void HUnaryControlInstruction::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
HControlInstruction::PrintDataTo(stream);
}
void HCompareMap::PrintDataTo(StringStream* stream) const {
void HCompareMap::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" (%p)", *map());
HControlInstruction::PrintDataTo(stream);
@ -679,19 +650,19 @@ const char* HUnaryMathOperation::OpName() const {
}
void HUnaryMathOperation::PrintDataTo(StringStream* stream) const {
void HUnaryMathOperation::PrintDataTo(StringStream* stream) {
const char* name = OpName();
stream->Add("%s ", name);
value()->PrintNameTo(stream);
}
void HUnaryOperation::PrintDataTo(StringStream* stream) const {
void HUnaryOperation::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
}
void HHasInstanceType::PrintDataTo(StringStream* stream) const {
void HHasInstanceType::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
switch (from_) {
case FIRST_JS_OBJECT_TYPE:
@ -712,14 +683,14 @@ void HHasInstanceType::PrintDataTo(StringStream* stream) const {
}
void HTypeofIs::PrintDataTo(StringStream* stream) const {
void HTypeofIs::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" == ");
stream->Add(type_literal_->ToAsciiVector());
}
void HChange::PrintDataTo(StringStream* stream) const {
void HChange::PrintDataTo(StringStream* stream) {
HUnaryOperation::PrintDataTo(stream);
stream->Add(" %s to %s", from_.Mnemonic(), to_.Mnemonic());
@ -735,26 +706,26 @@ HCheckInstanceType* HCheckInstanceType::NewIsJSObjectOrJSFunction(
}
void HCheckMap::PrintDataTo(StringStream* stream) const {
void HCheckMap::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" %p", *map());
}
void HCheckFunction::PrintDataTo(StringStream* stream) const {
void HCheckFunction::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" %p", *target());
}
void HCallStub::PrintDataTo(StringStream* stream) const {
void HCallStub::PrintDataTo(StringStream* stream) {
stream->Add("%s ",
CodeStub::MajorName(major_key_, false));
HUnaryCall::PrintDataTo(stream);
}
void HInstanceOf::PrintDataTo(StringStream* stream) const {
void HInstanceOf::PrintDataTo(StringStream* stream) {
left()->PrintNameTo(stream);
stream->Add(" ");
right()->PrintNameTo(stream);
@ -899,7 +870,7 @@ Range* HMod::InferRange() {
}
void HPhi::PrintTo(StringStream* stream) const {
void HPhi::PrintTo(StringStream* stream) {
stream->Add("[");
for (int i = 0; i < OperandCount(); ++i) {
HValue* value = OperandAt(i);
@ -925,7 +896,7 @@ void HPhi::AddInput(HValue* value) {
}
HValue* HPhi::GetRedundantReplacement() const {
HValue* HPhi::GetRedundantReplacement() {
HValue* candidate = NULL;
int count = OperandCount();
int position = 0;
@ -977,7 +948,7 @@ void HPhi::AddIndirectUsesTo(int* dest) {
}
void HSimulate::PrintDataTo(StringStream* stream) const {
void HSimulate::PrintDataTo(StringStream* stream) {
stream->Add("id=%d ", ast_id());
if (pop_count_ > 0) stream->Add("pop %d", pop_count_);
if (values_.length() > 0) {
@ -994,7 +965,7 @@ void HSimulate::PrintDataTo(StringStream* stream) const {
}
void HEnterInlined::PrintDataTo(StringStream* stream) const {
void HEnterInlined::PrintDataTo(StringStream* stream) {
SmartPointer<char> name = function()->debug_name()->ToCString();
stream->Add("%s, id=%d", *name, function()->id());
}
@ -1035,7 +1006,7 @@ HConstant* HConstant::CopyToTruncatedInt32() const {
}
void HConstant::PrintDataTo(StringStream* stream) const {
void HConstant::PrintDataTo(StringStream* stream) {
handle()->ShortPrint(stream);
}
@ -1045,7 +1016,7 @@ bool HArrayLiteral::IsCopyOnWrite() const {
}
void HBinaryOperation::PrintDataTo(StringStream* stream) const {
void HBinaryOperation::PrintDataTo(StringStream* stream) {
left()->PrintNameTo(stream);
stream->Add(" ");
right()->PrintNameTo(stream);
@ -1129,7 +1100,7 @@ Range* HShl::InferRange() {
void HCompare::PrintDataTo(StringStream* stream) const {
void HCompare::PrintDataTo(StringStream* stream) {
stream->Add(Token::Name(token()));
stream->Add(" ");
HBinaryOperation::PrintDataTo(stream);
@ -1148,18 +1119,18 @@ void HCompare::SetInputRepresentation(Representation r) {
}
void HParameter::PrintDataTo(StringStream* stream) const {
void HParameter::PrintDataTo(StringStream* stream) {
stream->Add("%u", index());
}
void HLoadNamedField::PrintDataTo(StringStream* stream) const {
void HLoadNamedField::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add(" @%d%s", offset(), is_in_object() ? "[in-object]" : "");
}
void HLoadKeyed::PrintDataTo(StringStream* stream) const {
void HLoadKeyedFastElement::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add("[");
key()->PrintNameTo(stream);
@ -1167,7 +1138,15 @@ void HLoadKeyed::PrintDataTo(StringStream* stream) const {
}
void HLoadPixelArrayElement::PrintDataTo(StringStream* stream) const {
void HLoadKeyedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add("[");
key()->PrintNameTo(stream);
stream->Add("]");
}
void HLoadPixelArrayElement::PrintDataTo(StringStream* stream) {
external_pointer()->PrintNameTo(stream);
stream->Add("[");
key()->PrintNameTo(stream);
@ -1175,7 +1154,7 @@ void HLoadPixelArrayElement::PrintDataTo(StringStream* stream) const {
}
void HStoreNamed::PrintDataTo(StringStream* stream) const {
void HStoreNamedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add(".");
ASSERT(name()->IsString());
@ -1185,15 +1164,20 @@ void HStoreNamed::PrintDataTo(StringStream* stream) const {
}
void HStoreNamedField::PrintDataTo(StringStream* stream) const {
HStoreNamed::PrintDataTo(stream);
void HStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add(".");
ASSERT(name()->IsString());
stream->Add(*String::cast(*name())->ToCString());
stream->Add(" = ");
value()->PrintNameTo(stream);
if (!transition().is_null()) {
stream->Add(" (transition map %p)", *transition());
}
}
void HStoreKeyed::PrintDataTo(StringStream* stream) const {
void HStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add("[");
key()->PrintNameTo(stream);
@ -1202,25 +1186,43 @@ void HStoreKeyed::PrintDataTo(StringStream* stream) const {
}
void HLoadGlobal::PrintDataTo(StringStream* stream) const {
void HStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add("[");
key()->PrintNameTo(stream);
stream->Add("] = ");
value()->PrintNameTo(stream);
}
void HStorePixelArrayElement::PrintDataTo(StringStream* stream) {
external_pointer()->PrintNameTo(stream);
stream->Add("[");
key()->PrintNameTo(stream);
stream->Add("] = ");
value()->PrintNameTo(stream);
}
void HLoadGlobal::PrintDataTo(StringStream* stream) {
stream->Add("[%p]", *cell());
if (check_hole_value()) stream->Add(" (deleteable/read-only)");
}
void HStoreGlobal::PrintDataTo(StringStream* stream) const {
void HStoreGlobal::PrintDataTo(StringStream* stream) {
stream->Add("[%p] = ", *cell());
value()->PrintNameTo(stream);
}
void HLoadContextSlot::PrintDataTo(StringStream* stream) const {
void HLoadContextSlot::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add("[%d]", slot_index());
}
void HStoreContextSlot::PrintDataTo(StringStream* stream) const {
void HStoreContextSlot::PrintDataTo(StringStream* stream) {
context()->PrintNameTo(stream);
stream->Add("[%d] = ", slot_index());
value()->PrintNameTo(stream);
@ -1230,33 +1232,33 @@ void HStoreContextSlot::PrintDataTo(StringStream* stream) const {
// Implementation of type inference and type conversions. Calculates
// the inferred type of this instruction based on the input operands.
HType HValue::CalculateInferredType() const {
HType HValue::CalculateInferredType() {
return type_;
}
HType HCheckMap::CalculateInferredType() const {
HType HCheckMap::CalculateInferredType() {
return value()->type();
}
HType HCheckFunction::CalculateInferredType() const {
HType HCheckFunction::CalculateInferredType() {
return value()->type();
}
HType HCheckNonSmi::CalculateInferredType() const {
HType HCheckNonSmi::CalculateInferredType() {
// TODO(kasperl): Is there any way to signal that this isn't a smi?
return HType::Tagged();
}
HType HCheckSmi::CalculateInferredType() const {
HType HCheckSmi::CalculateInferredType() {
return HType::Smi();
}
HType HPhi::CalculateInferredType() const {
HType HPhi::CalculateInferredType() {
HType result = HType::Uninitialized();
for (int i = 0; i < OperandCount(); ++i) {
HType current = OperandAt(i)->type();
@ -1266,77 +1268,77 @@ HType HPhi::CalculateInferredType() const {
}
HType HConstant::CalculateInferredType() const {
HType HConstant::CalculateInferredType() {
return constant_type_;
}
HType HCompare::CalculateInferredType() const {
HType HCompare::CalculateInferredType() {
return HType::Boolean();
}
HType HCompareJSObjectEq::CalculateInferredType() const {
HType HCompareJSObjectEq::CalculateInferredType() {
return HType::Boolean();
}
HType HUnaryPredicate::CalculateInferredType() const {
HType HUnaryPredicate::CalculateInferredType() {
return HType::Boolean();
}
HType HBitwiseBinaryOperation::CalculateInferredType() const {
HType HBitwiseBinaryOperation::CalculateInferredType() {
return HType::TaggedNumber();
}
HType HArithmeticBinaryOperation::CalculateInferredType() const {
HType HArithmeticBinaryOperation::CalculateInferredType() {
return HType::TaggedNumber();
}
HType HAdd::CalculateInferredType() const {
HType HAdd::CalculateInferredType() {
return HType::Tagged();
}
HType HBitAnd::CalculateInferredType() const {
HType HBitAnd::CalculateInferredType() {
return HType::TaggedNumber();
}
HType HBitXor::CalculateInferredType() const {
HType HBitXor::CalculateInferredType() {
return HType::TaggedNumber();
}
HType HBitOr::CalculateInferredType() const {
HType HBitOr::CalculateInferredType() {
return HType::TaggedNumber();
}
HType HBitNot::CalculateInferredType() const {
HType HBitNot::CalculateInferredType() {
return HType::TaggedNumber();
}
HType HUnaryMathOperation::CalculateInferredType() const {
HType HUnaryMathOperation::CalculateInferredType() {
return HType::TaggedNumber();
}
HType HShl::CalculateInferredType() const {
HType HShl::CalculateInferredType() {
return HType::TaggedNumber();
}
HType HShr::CalculateInferredType() const {
HType HShr::CalculateInferredType() {
return HType::TaggedNumber();
}
HType HSar::CalculateInferredType() const {
HType HSar::CalculateInferredType() {
return HType::TaggedNumber();
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

195
deps/v8/src/hydrogen.h vendored
View File

@ -196,94 +196,54 @@ class HSubgraph: public ZoneObject {
explicit HSubgraph(HGraph* graph)
: graph_(graph),
entry_block_(NULL),
exit_block_(NULL),
break_continue_info_(4) {
exit_block_(NULL) {
}
HGraph* graph() const { return graph_; }
HEnvironment* environment() const {
ASSERT(HasExit());
return exit_block_->last_environment();
}
bool HasExit() const { return exit_block_ != NULL; }
void PreProcessOsrEntry(IterationStatement* statement);
void AppendOptional(HSubgraph* graph,
bool on_true_branch,
HValue* boolean_value);
void AppendJoin(HSubgraph* then_graph, HSubgraph* else_graph, AstNode* node);
void AppendWhile(HSubgraph* condition,
HSubgraph* body,
IterationStatement* statement,
HSubgraph* continue_subgraph,
HSubgraph* exit);
void AppendDoWhile(HSubgraph* body,
IterationStatement* statement,
HSubgraph* go_back,
HSubgraph* exit);
void AppendEndless(HSubgraph* body, IterationStatement* statement);
void Append(HSubgraph* next, BreakableStatement* statement);
void ResolveContinue(IterationStatement* statement);
HBasicBlock* BundleBreak(BreakableStatement* statement);
HBasicBlock* BundleContinue(IterationStatement* statement);
HBasicBlock* BundleBreakContinue(BreakableStatement* statement,
bool is_continue,
int join_id);
HBasicBlock* JoinBlocks(HBasicBlock* a, HBasicBlock* b, int id);
void FinishExit(HControlInstruction* instruction);
void FinishBreakContinue(BreakableStatement* target, bool is_continue);
void Initialize(HBasicBlock* block) {
ASSERT(entry_block_ == NULL);
entry_block_ = block;
exit_block_ = block;
}
HBasicBlock* entry_block() const { return entry_block_; }
HBasicBlock* exit_block() const { return exit_block_; }
void set_exit_block(HBasicBlock* block) {
exit_block_ = block;
}
void ConnectExitTo(HBasicBlock* other, bool include_stack_check = false) {
if (HasExit()) {
exit_block()->Goto(other, include_stack_check);
}
}
void PreProcessOsrEntry(IterationStatement* statement);
void AddBreakContinueInfo(HSubgraph* other) {
break_continue_info_.AddAll(other->break_continue_info_);
void AppendJoin(HBasicBlock* first, HBasicBlock* second, int join_id);
void AppendWhile(IterationStatement* statement,
HBasicBlock* condition_entry,
HBasicBlock* exit_block,
HBasicBlock* body_exit,
HBasicBlock* break_block,
HBasicBlock* loop_entry,
HBasicBlock* loop_exit);
void AppendDoWhile(IterationStatement* statement,
HBasicBlock* body_entry,
HBasicBlock* go_back,
HBasicBlock* exit_block,
HBasicBlock* break_block);
void AppendEndless(IterationStatement* statement,
HBasicBlock* body_entry,
HBasicBlock* body_exit,
HBasicBlock* break_block);
void Append(BreakableStatement* stmt,
HBasicBlock* entry_block,
HBasicBlock* exit_block,
HBasicBlock* break_block);
void ResolveContinue(IterationStatement* statement,
HBasicBlock* continue_block);
HBasicBlock* JoinBlocks(HBasicBlock* a, HBasicBlock* b, int id);
void FinishExit(HControlInstruction* instruction);
void Initialize(HBasicBlock* block) {
ASSERT(entry_block_ == NULL);
entry_block_ = block;
exit_block_ = block;
}
protected:
class BreakContinueInfo: public ZoneObject {
public:
BreakContinueInfo(BreakableStatement* target, HBasicBlock* block,
bool is_continue)
: target_(target), block_(block), continue_(is_continue) {}
BreakableStatement* target() const { return target_; }
HBasicBlock* block() const { return block_; }
bool is_continue() const { return continue_; }
bool IsResolved() const { return block_ == NULL; }
void Resolve() { block_ = NULL; }
private:
BreakableStatement* target_;
HBasicBlock* block_;
bool continue_;
};
const ZoneList<BreakContinueInfo*>* break_continue_info() const {
return &break_continue_info_;
}
HGraph* graph_; // The graph this is a subgraph of.
HBasicBlock* entry_block_;
HBasicBlock* exit_block_;
private:
ZoneList<BreakContinueInfo*> break_continue_info_;
};
@ -621,6 +581,53 @@ class TestContext: public AstContext {
class HGraphBuilder: public AstVisitor {
public:
enum BreakType { BREAK, CONTINUE };
// A class encapsulating (lazily-allocated) break and continue blocks for
// a breakable statement. Separated from BreakAndContinueScope so that it
// can have a separate lifetime.
class BreakAndContinueInfo BASE_EMBEDDED {
public:
explicit BreakAndContinueInfo(BreakableStatement* target)
: target_(target), break_block_(NULL), continue_block_(NULL) {
}
BreakableStatement* target() { return target_; }
HBasicBlock* break_block() { return break_block_; }
void set_break_block(HBasicBlock* block) { break_block_ = block; }
HBasicBlock* continue_block() { return continue_block_; }
void set_continue_block(HBasicBlock* block) { continue_block_ = block; }
private:
BreakableStatement* target_;
HBasicBlock* break_block_;
HBasicBlock* continue_block_;
};
// A helper class to maintain a stack of current BreakAndContinueInfo
// structures mirroring BreakableStatement nesting.
class BreakAndContinueScope BASE_EMBEDDED {
public:
BreakAndContinueScope(BreakAndContinueInfo* info, HGraphBuilder* owner)
: info_(info), owner_(owner), next_(owner->break_scope()) {
owner->set_break_scope(this);
}
~BreakAndContinueScope() { owner_->set_break_scope(next_); }
BreakAndContinueInfo* info() { return info_; }
HGraphBuilder* owner() { return owner_; }
BreakAndContinueScope* next() { return next_; }
// Search the break stack for a break or continue target.
HBasicBlock* Get(BreakableStatement* stmt, BreakType type);
private:
BreakAndContinueInfo* info_;
HGraphBuilder* owner_;
BreakAndContinueScope* next_;
};
explicit HGraphBuilder(TypeFeedbackOracle* oracle)
: oracle_(oracle),
graph_(NULL),
@ -629,16 +636,25 @@ class HGraphBuilder: public AstVisitor {
ast_context_(NULL),
call_context_(NULL),
function_return_(NULL),
inlined_count_(0) { }
inlined_count_(0),
break_scope_(NULL) {
}
HGraph* CreateGraph(CompilationInfo* info);
// Simple accessors.
HGraph* graph() const { return graph_; }
HSubgraph* subgraph() const { return current_subgraph_; }
BreakAndContinueScope* break_scope() const { return break_scope_; }
void set_break_scope(BreakAndContinueScope* head) { break_scope_ = head; }
HEnvironment* environment() const { return subgraph()->environment(); }
HBasicBlock* CurrentBlock() const { return subgraph()->exit_block(); }
HBasicBlock* current_block() const { return subgraph()->exit_block(); }
void set_current_block(HBasicBlock* block) {
subgraph()->set_exit_block(block);
}
HEnvironment* environment() const {
return current_block()->last_environment();
}
// Adding instructions.
HInstruction* AddInstruction(HInstruction* instr);
@ -650,8 +666,7 @@ class HGraphBuilder: public AstVisitor {
private:
// Type of a member function that generates inline code for a native function.
typedef void (HGraphBuilder::*InlineFunctionGenerator)(int argument_count,
int ast_id);
typedef void (HGraphBuilder::*InlineFunctionGenerator)(CallRuntime* call);
// Forward declarations for inner scope classes.
class SubgraphScope;
@ -675,7 +690,7 @@ class HGraphBuilder: public AstVisitor {
// Generators for inline runtime functions.
#define INLINE_FUNCTION_GENERATOR_DECLARATION(Name, argc, ressize) \
void Generate##Name(int argument_count, int ast_id);
void Generate##Name(CallRuntime* call);
INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_DECLARATION)
INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_DECLARATION)
@ -684,9 +699,10 @@ class HGraphBuilder: public AstVisitor {
void Bailout(const char* reason);
void AppendPeeledWhile(IterationStatement* stmt,
HSubgraph* cond_graph,
HSubgraph* body_graph,
HSubgraph* exit_graph);
HBasicBlock* condition_entry,
HBasicBlock* exit_block,
HBasicBlock* body_exit,
HBasicBlock* break_block);
void AddToSubgraph(HSubgraph* graph, ZoneList<Statement*>* stmts);
void AddToSubgraph(HSubgraph* graph, Statement* stmt);
@ -702,17 +718,21 @@ class HGraphBuilder: public AstVisitor {
HBasicBlock* true_block,
HBasicBlock* false_block);
// Visit an argument subexpression.
// Visit an argument subexpression and emit a push to the outgoing
// arguments.
void VisitArgument(Expression* expr);
void VisitArgumentList(ZoneList<Expression*>* arguments);
// Visit a list of expressions from left to right, each in a value context.
void VisitExpressions(ZoneList<Expression*>* exprs);
void AddPhi(HPhi* phi);
void PushAndAdd(HInstruction* instr);
// Remove the arguments from the bailout environment and emit instructions
// to push them as outgoing parameters.
void PreProcessCall(HCall* call);
template <int V> HInstruction* PreProcessCall(HCall<V>* call);
void AssumeRepresentation(HValue* value, Representation r);
static Representation ToRepresentation(TypeInfo info);
@ -724,8 +744,6 @@ class HGraphBuilder: public AstVisitor {
AST_NODE_LIST(DECLARE_VISIT)
#undef DECLARE_VISIT
bool ShouldPeel(HSubgraph* cond, HSubgraph* body);
HBasicBlock* CreateBasicBlock(HEnvironment* env);
HSubgraph* CreateEmptySubgraph();
HSubgraph* CreateGotoSubgraph(HEnvironment* env);
@ -816,6 +834,11 @@ class HGraphBuilder: public AstVisitor {
HValue* val,
Expression* expr);
HInstruction* BuildStoreKeyedPixelArrayElement(HValue* object,
HValue* key,
HValue* val,
Expression* expr);
HCompare* BuildSwitchCompare(HSubgraph* subgraph,
HValue* switch_value,
CaseClause* clause);
@ -853,6 +876,8 @@ class HGraphBuilder: public AstVisitor {
int inlined_count_;
BreakAndContinueScope* break_scope_;
friend class AstContext; // Pushes and pops the AST context stack.
DISALLOW_COPY_AND_ASSIGN(HGraphBuilder);

View File

@ -183,13 +183,6 @@ const XMMRegister xmm7 = { 7 };
typedef XMMRegister DoubleRegister;
// Index of register used in pusha/popa.
// Order of pushed registers: EAX, ECX, EDX, EBX, ESP, EBP, ESI, and EDI
inline int EspIndexForPushAll(Register reg) {
return Register::kNumRegisters - 1 - reg.code();
}
enum Condition {
// any value < 0 is considered no_condition
no_condition = -1,
@ -980,6 +973,10 @@ class Assembler : public Malloced {
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
int relocation_writer_size() {
return (buffer_ + buffer_size_) - reloc_info_writer.pos();
}
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512*MB;
static const int kMinimalBufferSize = 4*KB;

View File

@ -2385,14 +2385,14 @@ void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
NearLabel call_runtime;
ASSERT(op_ == Token::ADD);
NearLabel left_not_string, call_runtime;
// Registers containing left and right operands respectively.
Register left = edx;
Register right = eax;
// Test if left operand is a string.
NearLabel left_not_string;
__ test(left, Immediate(kSmiTagMask));
__ j(zero, &left_not_string);
__ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);

View File

@ -8234,8 +8234,8 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
if (variable != NULL) {
// Delete of an unqualified identifier is disallowed in strict mode
// so this code can only be reached in non-strict mode.
ASSERT(strict_mode_flag() == kNonStrictMode);
// but "delete this" is.
ASSERT(strict_mode_flag() == kNonStrictMode || variable->is_this());
Slot* slot = variable->AsSlot();
if (variable->is_global()) {
LoadGlobal();
@ -8244,7 +8244,6 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
CALL_FUNCTION, 3);
frame_->Push(&answer);
return;
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
// Call the runtime to delete from the context holding the named
@ -8255,13 +8254,11 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
frame_->EmitPush(Immediate(variable->name()));
Result answer = frame_->CallRuntime(Runtime::kDeleteContextSlot, 2);
frame_->Push(&answer);
return;
} else {
// Default: Result of deleting non-global, not dynamically
// introduced variables is false.
frame_->Push(Factory::false_value());
}
// Default: Result of deleting non-global, not dynamically
// introduced variables is false.
frame_->Push(Factory::false_value());
} else {
// Default: Result of deleting expressions is true.
Load(node->expression()); // may have side-effects

View File

@ -431,14 +431,16 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
fp_value, output_offset, value);
}
// The context can be gotten from the function so long as we don't
// optimize functions that need local contexts.
// For the bottommost output frame the context can be gotten from the input
// frame. For all subsequent output frames it can be gotten from the function
// so long as we don't inline functions that need local contexts.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
value = reinterpret_cast<uint32_t>(function->context());
// The context for the bottommost output frame should also agree with the
// input frame.
ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
if (is_bottommost) {
value = input_->GetFrameSlot(input_offset);
} else {
value = reinterpret_cast<uint32_t>(function->context());
}
output_frame->SetFrameSlot(output_offset, value);
if (is_topmost) output_frame->SetRegister(esi.code(), value);
if (FLAG_trace_deopt) {

View File

@ -3743,8 +3743,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
}
} else if (var != NULL) {
// Delete of an unqualified identifier is disallowed in strict mode
// so this code can only be reached in non-strict mode.
ASSERT(strict_mode_flag() == kNonStrictMode);
// but "delete this" is.
ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
if (var->is_global()) {
__ push(GlobalObjectOperand());
__ push(Immediate(var->name()));
@ -3782,17 +3782,22 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
case Token::NOT: {
Comment cmnt(masm_, "[ UnaryOperation (NOT)");
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
// Notice that the labels are swapped.
context()->PrepareTest(&materialize_true, &materialize_false,
&if_false, &if_true, &fall_through);
if (context()->IsTest()) ForwardBailoutToChild(expr);
VisitForControl(expr->expression(), if_true, if_false, fall_through);
context()->Plug(if_false, if_true); // Labels swapped.
if (context()->IsEffect()) {
// Unary NOT has no side effects so it's only necessary to visit the
// subexpression. Match the optimizing compiler by not branching.
VisitForEffect(expr->expression());
} else {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
// Notice that the labels are swapped.
context()->PrepareTest(&materialize_true, &materialize_false,
&if_false, &if_true, &fall_through);
if (context()->IsTest()) ForwardBailoutToChild(expr);
VisitForControl(expr->expression(), if_true, if_false, fall_through);
context()->Plug(if_false, if_true); // Labels swapped.
}
break;
}

View File

@ -55,7 +55,7 @@ class SafepointGenerator : public PostCallGenerator {
// Ensure that we have enough space in the reloc info to patch
// this with calls when doing deoptimization.
if (ensure_reloc_space_) {
codegen_->masm()->RecordComment(RelocInfo::kFillerCommentString, true);
codegen_->EnsureRelocSpaceForDeoptimization();
}
codegen_->RecordSafepoint(pointers_, deoptimization_index_);
}
@ -78,6 +78,7 @@ bool LCodeGen::GenerateCode() {
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
GenerateRelocPadding() &&
GenerateSafepointTable();
}
@ -122,6 +123,16 @@ void LCodeGen::Comment(const char* format, ...) {
}
bool LCodeGen::GenerateRelocPadding() {
int reloc_size = masm()->relocation_writer_size();
while (reloc_size < deoptimization_reloc_size.min_size) {
__ RecordComment(RelocInfo::kFillerCommentString, true);
reloc_size += RelocInfo::kRelocCommentSize;
}
return !is_aborted();
}
bool LCodeGen::GeneratePrologue() {
ASSERT(is_generating());
@ -163,6 +174,45 @@ bool LCodeGen::GeneratePrologue() {
}
}
// Possibly allocate a local context.
int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment(";;; Allocate local context");
// Argument to NewContext is the function, which is still in edi.
__ push(edi);
if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(heap_slots);
__ CallStub(&stub);
} else {
__ CallRuntime(Runtime::kNewContext, 1);
}
RecordSafepoint(Safepoint::kNoDeoptimizationIndex);
// Context is returned in both eax and esi. It replaces the context
// passed to us. It's saved in the stack and kept live in esi.
__ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
// Copy parameters into context if necessary.
int num_parameters = scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
Slot* slot = scope()->parameter(i)->AsSlot();
if (slot != NULL && slot->type() == Slot::CONTEXT) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
// Load parameter from stack.
__ mov(eax, Operand(ebp, parameter_offset));
// Store it in the context.
int context_offset = Context::SlotOffset(slot->index());
__ mov(Operand(esi, context_offset), eax);
// Update the write barrier. This clobbers all involved
// registers, so we have to use a third register to avoid
// clobbering esi.
__ mov(ecx, esi);
__ RecordWrite(ecx, context_offset, eax, ebx);
}
}
Comment(";;; End allocate local context");
}
// Trace the call.
if (FLAG_trace) {
// We have not executed any compiled code yet, so esi still holds the
@ -335,6 +385,22 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
}
void LCodeGen::EnsureRelocSpaceForDeoptimization() {
// Since we patch the reloc info with RUNTIME_ENTRY calls every patch
// site will take up 2 bytes + any pc-jumps.
// We are conservative and always reserver 6 bytes in case where a
// simple pc-jump is not enough.
uint32_t pc_delta =
masm()->pc_offset() - deoptimization_reloc_size.last_pc_offset;
if (is_uintn(pc_delta, 6)) {
deoptimization_reloc_size.min_size += 2;
} else {
deoptimization_reloc_size.min_size += 6;
}
deoptimization_reloc_size.last_pc_offset = masm()->pc_offset();
}
void LCodeGen::AddToTranslation(Translation* translation,
LOperand* op,
bool is_tagged) {
@ -382,10 +448,13 @@ void LCodeGen::CallCode(Handle<Code> code,
ASSERT(instr != NULL);
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
if (!adjusted) {
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
}
__ call(code, mode);
EnsureRelocSpaceForDeoptimization();
RegisterLazyDeoptimization(instr);
// Signal that we don't inline smi code before these stubs in the
@ -595,6 +664,12 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers,
}
void LCodeGen::RecordSafepoint(int deoptimization_index) {
LPointerMap empty_pointers(RelocInfo::kNoPosition);
RecordSafepoint(&empty_pointers, deoptimization_index);
}
void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
int deoptimization_index) {
@ -1836,7 +1911,7 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label before_push_delta;
__ bind(&before_push_delta);
__ mov(temp, Immediate(delta));
__ mov(Operand(esp, EspIndexForPushAll(temp) * kPointerSize), temp);
__ StoreToSafepointRegisterSlot(temp, temp);
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ call(stub.GetCode(), RelocInfo::CODE_TARGET);
ASSERT_EQ(kAdditionalDelta,
@ -1844,8 +1919,7 @@ void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
// Put the result value into the eax slot and restore all registers.
__ mov(Operand(esp, EspIndexForPushAll(eax) * kPointerSize), eax);
__ StoreToSafepointRegisterSlot(eax, eax);
__ PopSafepointRegisters();
}
@ -2100,13 +2174,13 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
void LCodeGen::DoLoadPixelArrayElement(LLoadPixelArrayElement* instr) {
Register external_elements = ToRegister(instr->external_pointer());
Register external_pointer = ToRegister(instr->external_pointer());
Register key = ToRegister(instr->key());
Register result = ToRegister(instr->result());
ASSERT(result.is(external_elements));
ASSERT(result.is(external_pointer));
// Load the result.
__ movzx_b(result, Operand(external_elements, key, times_1, 0));
__ movzx_b(result, Operand(external_pointer, key, times_1, 0));
}
@ -2301,11 +2375,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
if (*function == *graph()->info()->closure()) {
__ CallSelf();
} else {
// This is an indirect call and will not be recorded in the reloc info.
// Add a comment to the reloc info in case we need to patch this during
// deoptimization.
__ RecordComment(RelocInfo::kFillerCommentString, true);
__ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
EnsureRelocSpaceForDeoptimization();
}
// Setup deoptimization.
@ -2360,7 +2431,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
if (!tmp.is(eax)) __ mov(tmp, eax);
// Restore input_reg after call to runtime.
__ mov(input_reg, Operand(esp, EspIndexForPushAll(input_reg) * kPointerSize));
__ LoadFromSafepointRegisterSlot(input_reg, input_reg);
__ bind(&allocated);
__ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
@ -2368,7 +2439,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
__ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
__ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
__ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
__ mov(Operand(esp, EspIndexForPushAll(input_reg) * kPointerSize), tmp);
__ StoreToSafepointRegisterSlot(input_reg, tmp);
__ bind(&done);
__ PopSafepointRegisters();
@ -2493,11 +2564,6 @@ void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
XMMRegister xmm_scratch = xmm0;
XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
ExternalReference negative_infinity =
ExternalReference::address_of_negative_infinity();
__ movdbl(xmm_scratch, Operand::StaticVariable(negative_infinity));
__ ucomisd(xmm_scratch, input_reg);
DeoptimizeIf(equal, instr->environment());
__ xorpd(xmm_scratch, xmm_scratch);
__ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
__ sqrtsd(input_reg, input_reg);
@ -2731,6 +2797,25 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
}
void LCodeGen::DoStorePixelArrayElement(LStorePixelArrayElement* instr) {
Register external_pointer = ToRegister(instr->external_pointer());
Register key = ToRegister(instr->key());
Register value = ToRegister(instr->value());
ASSERT(ToRegister(instr->TempAt(0)).is(eax));
__ mov(eax, value);
{ // Clamp the value to [0..255].
NearLabel done;
__ test(eax, Immediate(0xFFFFFF00));
__ j(zero, &done);
__ setcc(negative, eax); // 1 if negative, 0 if positive.
__ dec_b(eax); // 0 if negative, 255 if positive.
__ bind(&done);
}
__ mov_b(Operand(external_pointer, key, times_1, 0), eax);
}
void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
Register value = ToRegister(instr->value());
Register elements = ToRegister(instr->object());
@ -2840,19 +2925,20 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
__ test(result, Immediate(kStringRepresentationMask));
__ j(not_zero, deferred->entry());
// Check for 1-byte or 2-byte string.
// Check for ASCII or two-byte string.
__ bind(&flat_string);
STATIC_ASSERT(kAsciiStringTag != 0);
__ test(result, Immediate(kStringEncodingMask));
__ j(not_zero, &ascii_string);
// 2-byte string.
// Load the 2-byte character code into the result register.
// Two-byte string.
// Load the two-byte character code into the result register.
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
if (instr->index()->IsConstantOperand()) {
__ movzx_w(result,
FieldOperand(string,
SeqTwoByteString::kHeaderSize + 2 * const_index));
SeqTwoByteString::kHeaderSize +
(kUC16Size * const_index)));
} else {
__ movzx_w(result, FieldOperand(string,
index,
@ -2908,7 +2994,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
__ AbortIfNotSmi(eax);
}
__ SmiUntag(eax);
__ mov(Operand(esp, EspIndexForPushAll(result) * kPointerSize), eax);
__ StoreToSafepointRegisterSlot(result, eax);
__ PopSafepointRegisters();
}
@ -2976,7 +3062,7 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
// TODO(3095996): Put a valid pointer value in the stack slot where the result
// register is stored, as this register is in the pointer map, but contains an
// integer value.
__ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), Immediate(0));
__ StoreToSafepointRegisterSlot(reg, Immediate(0));
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
@ -2988,7 +3074,7 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
// number.
__ bind(&done);
__ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
__ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), reg);
__ StoreToSafepointRegisterSlot(reg, reg);
__ PopSafepointRegisters();
}
@ -3030,7 +3116,7 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
__ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), eax);
__ StoreToSafepointRegisterSlot(reg, eax);
__ PopSafepointRegisters();
}

View File

@ -60,6 +60,7 @@ class LCodeGen BASE_EMBEDDED {
status_(UNUSED),
deferred_(8),
osr_pc_offset_(-1),
deoptimization_reloc_size(),
resolver_(this) {
PopulateDeoptimizationLiteralsWithInlinedFunctions();
}
@ -102,6 +103,8 @@ class LCodeGen BASE_EMBEDDED {
// Emit frame translation commands for an environment.
void WriteTranslation(LEnvironment* environment, Translation* translation);
void EnsureRelocSpaceForDeoptimization();
// Declare methods that deal with the individual node types.
#define DECLARE_DO(type) void Do##type(L##type* node);
LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
@ -151,6 +154,9 @@ class LCodeGen BASE_EMBEDDED {
bool GeneratePrologue();
bool GenerateBody();
bool GenerateDeferredCode();
// Pad the reloc info to ensure that we have enough space to patch during
// deoptimization.
bool GenerateRelocPadding();
bool GenerateSafepointTable();
void CallCode(Handle<Code> code, RelocInfo::Mode mode, LInstruction* instr,
@ -204,6 +210,7 @@ class LCodeGen BASE_EMBEDDED {
int arguments,
int deoptimization_index);
void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
void RecordSafepoint(int deoptimization_index);
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
int deoptimization_index);
@ -251,6 +258,13 @@ class LCodeGen BASE_EMBEDDED {
ZoneList<LDeferredCode*> deferred_;
int osr_pc_offset_;
struct DeoptimizationRelocSize {
int min_size;
int last_pc_offset;
};
DeoptimizationRelocSize deoptimization_reloc_size;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
SafepointTableBuilder safepoints_;

View File

@ -404,7 +404,7 @@ void LChunk::MarkEmptyBlocks() {
}
void LStoreNamed::PrintDataTo(StringStream* stream) {
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(".");
stream->Add(*String::cast(*name())->ToCString());
@ -413,7 +413,25 @@ void LStoreNamed::PrintDataTo(StringStream* stream) {
}
void LStoreKeyed::PrintDataTo(StringStream* stream) {
void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(".");
stream->Add(*String::cast(*name())->ToCString());
stream->Add(" <- ");
value()->PrintTo(stream);
}
void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
stream->Add("] <- ");
value()->PrintTo(stream);
}
void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
@ -1223,7 +1241,7 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
case kMathSqrt:
return DefineSameAsFirst(result);
case kMathPowHalf:
return AssignEnvironment(DefineSameAsFirst(result));
return DefineSameAsFirst(result);
default:
UNREACHABLE();
return NULL;
@ -1840,6 +1858,23 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
}
LInstruction* LChunkBuilder::DoStorePixelArrayElement(
HStorePixelArrayElement* instr) {
ASSERT(instr->value()->representation().IsInteger32());
ASSERT(instr->external_pointer()->representation().IsExternal());
ASSERT(instr->key()->representation().IsInteger32());
LOperand* external_pointer = UseRegister(instr->external_pointer());
LOperand* val = UseRegister(instr->value());
LOperand* key = UseRegister(instr->key());
// The generated code requires that the clamped value is in a byte
// register. eax is an arbitrary choice to satisfy this requirement.
LOperand* clamped = FixedTemp(eax);
return new LStorePixelArrayElement(external_pointer, key, val, clamped);
}
LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* object = UseFixed(instr->object(), edx);
@ -1923,8 +1958,8 @@ LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
LDeleteProperty* result = new LDeleteProperty(Use(instr->object()),
UseOrConstant(instr->key()));
LDeleteProperty* result =
new LDeleteProperty(Use(instr->object()), UseOrConstant(instr->key()));
return MarkAsCall(DefineFixed(result, eax), instr);
}
@ -1957,8 +1992,10 @@ LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
// There are no real uses of the arguments object (we bail out in all other
// cases).
// There are no real uses of the arguments object.
// arguments.length and element access are supported directly on
// stack arguments, and any real arguments object use causes a bailout.
// So this value is never used.
return NULL;
}

View File

@ -42,8 +42,6 @@ class LCodeGen;
#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
V(ControlInstruction) \
V(Call) \
V(StoreKeyed) \
V(StoreNamed) \
LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
@ -150,6 +148,7 @@ class LCodeGen;
V(StoreKeyedGeneric) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
V(StorePixelArrayElement) \
V(StringCharCodeAt) \
V(StringLength) \
V(SubI) \
@ -1580,34 +1579,23 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
};
class LStoreNamed: public LTemplateInstruction<0, 2, 1> {
class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
public:
LStoreNamed(LOperand* obj, LOperand* val) {
LStoreNamedField(LOperand* obj, LOperand* val, LOperand* temp) {
inputs_[0] = obj;
inputs_[1] = val;
}
DECLARE_INSTRUCTION(StoreNamed)
DECLARE_HYDROGEN_ACCESSOR(StoreNamed)
virtual void PrintDataTo(StringStream* stream);
LOperand* object() { return inputs_[0]; }
LOperand* value() { return inputs_[1]; }
Handle<Object> name() const { return hydrogen()->name(); }
};
class LStoreNamedField: public LStoreNamed {
public:
LStoreNamedField(LOperand* obj, LOperand* val, LOperand* temp)
: LStoreNamed(obj, val) {
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
virtual void PrintDataTo(StringStream* stream);
LOperand* object() { return inputs_[0]; }
LOperand* value() { return inputs_[1]; }
Handle<Object> name() const { return hydrogen()->name(); }
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
@ -1626,6 +1614,8 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 3, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
virtual void PrintDataTo(StringStream* stream);
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
@ -1633,15 +1623,17 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 3, 0> {
};
class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) {
LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) {
inputs_[0] = obj;
inputs_[1] = key;
inputs_[2] = val;
}
DECLARE_INSTRUCTION(StoreKeyed)
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
"store-keyed-fast-element")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
virtual void PrintDataTo(StringStream* stream);
@ -1651,14 +1643,25 @@ class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
};
class LStoreKeyedFastElement: public LStoreKeyed {
class LStorePixelArrayElement: public LTemplateInstruction<0, 3, 1> {
public:
LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val)
: LStoreKeyed(obj, key, val) {}
LStorePixelArrayElement(LOperand* external_pointer,
LOperand* key,
LOperand* val,
LOperand* clamped) {
inputs_[0] = external_pointer;
inputs_[1] = key;
inputs_[2] = val;
temps_[0] = clamped;
}
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
"store-keyed-fast-element")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
DECLARE_CONCRETE_INSTRUCTION(StorePixelArrayElement,
"store-pixel-array-element")
DECLARE_HYDROGEN_ACCESSOR(StorePixelArrayElement)
LOperand* external_pointer() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
};
@ -1676,6 +1679,8 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 4, 0> {
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
virtual void PrintDataTo(StringStream* stream);
LOperand* context() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
LOperand* key() { return inputs_[2]; }

View File

@ -1654,6 +1654,28 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
}
// Store the value in register src in the safepoint register stack
// slot for register dst.
void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
mov(SafepointRegisterSlot(dst), src);
}
void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Immediate src) {
mov(SafepointRegisterSlot(dst), src);
}
void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
mov(dst, SafepointRegisterSlot(src));
}
Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
return Operand(esp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
}
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// The registers are pushed starting with the lowest encoding,
// which means that lowest encodings are furthest away from

View File

@ -143,7 +143,11 @@ class MacroAssembler: public Assembler {
// Push and pop the registers that can hold pointers.
void PushSafepointRegisters() { pushad(); }
void PopSafepointRegisters() { popad(); }
static int SafepointRegisterStackIndex(int reg_code);
// Store the value in register/immediate src in the safepoint
// register stack slot for register dst.
void StoreToSafepointRegisterSlot(Register dst, Register src);
void StoreToSafepointRegisterSlot(Register dst, Immediate src);
void LoadFromSafepointRegisterSlot(Register dst, Register src);
// ---------------------------------------------------------------------------
// JavaScript invokes
@ -667,6 +671,15 @@ class MacroAssembler: public Assembler {
MUST_USE_RESULT MaybeObject* PopHandleScopeHelper(Register saved,
Register scratch,
bool gc_allowed);
// Compute memory operands for safepoint stack slots.
Operand SafepointRegisterSlot(Register reg);
static int SafepointRegisterStackIndex(int reg_code);
// Needs access to SafepointRegisterStackIndex for optimized frame
// traversal.
friend class OptimizedFrame;
};

View File

@ -2204,8 +2204,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
break;
case STRING_CHECK:
if (!function->IsBuiltin()) {
// Calling non-builtins with a value as receiver requires boxing.
if (!function->IsBuiltin() && !function_info->strict_mode()) {
// Calling non-strict non-builtins with a value as the receiver
// requires boxing.
__ jmp(&miss);
} else {
// Check that the object is a string or a symbol.
@ -2220,8 +2221,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
break;
case NUMBER_CHECK: {
if (!function->IsBuiltin()) {
// Calling non-builtins with a value as receiver requires boxing.
if (!function->IsBuiltin() && !function_info->strict_mode()) {
// Calling non-strict non-builtins with a value as the receiver
// requires boxing.
__ jmp(&miss);
} else {
Label fast;
@ -2241,8 +2243,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
}
case BOOLEAN_CHECK: {
if (!function->IsBuiltin()) {
// Calling non-builtins with a value as receiver requires boxing.
if (!function->IsBuiltin() && !function_info->strict_mode()) {
// Calling non-strict non-builtins with a value as the receiver
// requires boxing.
__ jmp(&miss);
} else {
Label fast;
@ -2586,8 +2589,8 @@ MaybeObject* StoreStubCompiler::CompileStoreGlobal(GlobalObject* object,
// Compute the cell operand to use.
Operand cell_operand = Operand::Cell(Handle<JSGlobalPropertyCell>(cell));
if (Serializer::enabled()) {
__ mov(ecx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
cell_operand = FieldOperand(ecx, JSGlobalPropertyCell::kValueOffset);
__ mov(ebx, Immediate(Handle<JSGlobalPropertyCell>(cell)));
cell_operand = FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset);
}
// Check that the value in the cell is not the hole. If it is, this

93
deps/v8/src/ic.cc vendored
View File

@ -435,16 +435,25 @@ Object* CallICBase::TryCallAsFunction(Object* object) {
}
void CallICBase::ReceiverToObject(Handle<Object> object) {
HandleScope scope;
Handle<Object> receiver(object);
void CallICBase::ReceiverToObjectIfRequired(Handle<Object> callee,
Handle<Object> object) {
if (callee->IsJSFunction()) {
Handle<JSFunction> function = Handle<JSFunction>::cast(callee);
if (function->shared()->strict_mode() || function->IsBuiltin()) {
// Do not wrap receiver for strict mode functions or for builtins.
return;
}
}
// Change the receiver to the result of calling ToObject on it.
const int argc = this->target()->arguments_count();
StackFrameLocator locator;
JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
int index = frame->ComputeExpressionsCount() - (argc + 1);
frame->SetExpression(index, *Factory::ToObject(object));
// And only wrap string, number or boolean.
if (object->IsString() || object->IsNumber() || object->IsBoolean()) {
// Change the receiver to the result of calling ToObject on it.
const int argc = this->target()->arguments_count();
StackFrameLocator locator;
JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
int index = frame->ComputeExpressionsCount() - (argc + 1);
frame->SetExpression(index, *Factory::ToObject(object));
}
}
@ -458,10 +467,6 @@ MaybeObject* CallICBase::LoadFunction(State state,
return TypeError("non_object_property_call", object, name);
}
if (object->IsString() || object->IsNumber() || object->IsBoolean()) {
ReceiverToObject(object);
}
// Check if the name is trivially convertible to an index and get
// the element if so.
uint32_t index;
@ -505,6 +510,7 @@ MaybeObject* CallICBase::LoadFunction(State state,
object->GetProperty(*object, &lookup, *name, &attr);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
if (lookup.type() == INTERCEPTOR) {
// If the object does not have the requested property, check which
// exception we need to throw.
@ -516,31 +522,37 @@ MaybeObject* CallICBase::LoadFunction(State state,
}
}
ASSERT(result != Heap::the_hole_value());
ASSERT(!result->IsTheHole());
if (result->IsJSFunction()) {
HandleScope scope;
// Wrap result in a handle because ReceiverToObjectIfRequired may allocate
// new object and cause GC.
Handle<Object> result_handle(result);
// Make receiver an object if the callee requires it. Strict mode or builtin
// functions do not wrap the receiver, non-strict functions and objects
// called as functions do.
ReceiverToObjectIfRequired(result_handle, object);
if (result_handle->IsJSFunction()) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Handle stepping into a function if step into is active.
if (Debug::StepInActive()) {
// Protect the result in a handle as the debugger can allocate and might
// cause GC.
HandleScope scope;
Handle<JSFunction> function(JSFunction::cast(result));
Handle<JSFunction> function(JSFunction::cast(*result_handle));
Debug::HandleStepIn(function, object, fp(), false);
return *function;
}
#endif
return result;
return *result_handle;
}
// Try to find a suitable function delegate for the object at hand.
result = TryCallAsFunction(result);
MaybeObject* answer = result;
if (!result->IsJSFunction()) {
answer = TypeError("property_not_function", object, name);
}
return answer;
result_handle = Handle<Object>(TryCallAsFunction(*result_handle));
if (result_handle->IsJSFunction()) return *result_handle;
return TypeError("property_not_function", object, name);
}
@ -565,8 +577,8 @@ bool CallICBase::TryUpdateExtraICState(LookupResult* lookup,
case kStringCharAt:
if (object->IsString()) {
String* string = String::cast(*object);
// Check that there's the right wrapper in the receiver slot.
ASSERT(string == JSValue::cast(args[0])->value());
// Check there's the right string value or wrapper in the receiver slot.
ASSERT(string == args[0] || string == JSValue::cast(args[0])->value());
// If we're in the default (fastest) state and the index is
// out of bounds, update the state to record this fact.
if (*extra_ic_state == DEFAULT_STRING_STUB &&
@ -775,10 +787,6 @@ MaybeObject* KeyedCallIC::LoadFunction(State state,
return TypeError("non_object_property_call", object, key);
}
if (object->IsString() || object->IsNumber() || object->IsBoolean()) {
ReceiverToObject(object);
}
if (FLAG_use_ic && state != MEGAMORPHIC && !object->IsAccessCheckNeeded()) {
int argc = target()->arguments_count();
InLoopFlag in_loop = target()->ic_in_loop();
@ -793,17 +801,20 @@ MaybeObject* KeyedCallIC::LoadFunction(State state,
#endif
}
}
Object* result;
{ MaybeObject* maybe_result = Runtime::GetObjectProperty(object, key);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
if (result->IsJSFunction()) return result;
result = TryCallAsFunction(result);
MaybeObject* answer = result;
if (!result->IsJSFunction()) {
answer = TypeError("property_not_function", object, key);
}
return answer;
HandleScope scope;
Handle<Object> result = GetProperty(object, key);
// Make receiver an object if the callee requires it. Strict mode or builtin
// functions do not wrap the receiver, non-strict functions and objects
// called as functions do.
ReceiverToObjectIfRequired(result, object);
if (result->IsJSFunction()) return *result;
result = Handle<Object>(TryCallAsFunction(*result));
if (result->IsJSFunction()) return *result;
return TypeError("property_not_function", object, key);
}

2
deps/v8/src/ic.h vendored
View File

@ -224,7 +224,7 @@ class CallICBase: public IC {
// Otherwise, it returns the undefined value.
Object* TryCallAsFunction(Object* object);
void ReceiverToObject(Handle<Object> object);
void ReceiverToObjectIfRequired(Handle<Object> callee, Handle<Object> object);
static void Clear(Address address, Code* target);
friend class IC;

View File

@ -478,11 +478,6 @@ void LiveRange::ConvertOperands() {
}
UsePosition* LiveRange::AddUsePosition(LifetimePosition pos) {
return AddUsePosition(pos, CreateAssignedOperand());
}
bool LiveRange::CanCover(LifetimePosition position) const {
if (IsEmpty()) return false;
return Start().Value() <= position.Value() &&
@ -1098,6 +1093,21 @@ void LAllocator::ResolveControlFlow(LiveRange* range,
} else {
ASSERT(pred->end()->SecondSuccessor() == NULL);
gap = GetLastGap(pred);
// We are going to insert a move before the branch instruction.
// Some branch instructions (e.g. loops' back edges)
// can potentially cause a GC so they have a pointer map.
// By insterting a move we essentially create a copy of a
// value which is invisible to PopulatePointerMaps(), because we store
// it into a location different from the operand of a live range
// covering a branch instruction.
// Thus we need to manually record a pointer.
if (HasTaggedValue(range->id())) {
LInstruction* branch = InstructionAt(pred->last_instruction_index());
if (branch->HasPointerMap()) {
branch->pointer_map()->RecordPointer(cur_op);
}
}
}
gap->GetOrCreateParallelMove(LGap::START)->AddMove(pred_op, cur_op);
}

View File

@ -286,7 +286,6 @@ class LiveRange: public ZoneObject {
LiveRange* TopLevel() { return (parent_ == NULL) ? this : parent_; }
LiveRange* next() const { return next_; }
bool IsChild() const { return parent() != NULL; }
bool IsParent() const { return parent() == NULL; }
int id() const { return id_; }
bool IsFixed() const { return id_ < 0; }
bool IsEmpty() const { return first_interval() == NULL; }
@ -360,7 +359,6 @@ class LiveRange: public ZoneObject {
void EnsureInterval(LifetimePosition start, LifetimePosition end);
void AddUseInterval(LifetimePosition start, LifetimePosition end);
UsePosition* AddUsePosition(LifetimePosition pos, LOperand* operand);
UsePosition* AddUsePosition(LifetimePosition pos);
// Shorten the most recently added interval by setting a new start.
void ShortenTo(LifetimePosition start);

View File

@ -536,10 +536,12 @@ class ShallowIterator BASE_EMBEDDED {
inline LEnvironment* env() { return env_; }
private:
inline bool ShouldSkip(LOperand* op) {
return op == NULL || op->IsConstantOperand() || op->IsArgument();
}
inline int AdvanceToNext(int start) {
while (start < limit_ &&
(env_->values()->at(start) == NULL ||
env_->values()->at(start)->IsConstantOperand())) {
while (start < limit_ && ShouldSkip(env_->values()->at(start))) {
start++;
}
return start;

View File

@ -300,6 +300,8 @@ void LogMessageBuilder::AppendDetailed(String* str, bool show_impl_info) {
Append("\\,");
} else if (c == '\\') {
Append("\\\\");
} else if (c == '\"') {
Append("\"\"");
} else {
Append("%lc", c);
}

151
deps/v8/src/log.cc vendored
View File

@ -147,7 +147,7 @@ bool Profiler::paused_ = false;
// StackTracer implementation
//
void StackTracer::Trace(TickSample* sample) {
sample->function = NULL;
sample->tos = NULL;
sample->frames_count = 0;
// Avoid collecting traces while doing GC.
@ -159,15 +159,9 @@ void StackTracer::Trace(TickSample* sample) {
return;
}
const Address function_address =
sample->fp + JavaScriptFrameConstants::kFunctionOffset;
if (SafeStackFrameIterator::IsWithinBounds(sample->sp, js_entry_sp,
function_address)) {
Object* object = Memory::Object_at(function_address);
if (object->IsHeapObject()) {
sample->function = HeapObject::cast(object)->address();
}
}
// Sample potential return address value for frameless invocation of
// stubs (we'll figure out later, if this value makes sense).
sample->tos = Memory::Address_at(sample->sp);
int i = 0;
const Address callback = Top::external_callback();
@ -181,10 +175,7 @@ void StackTracer::Trace(TickSample* sample) {
SafeStackTraceFrameIterator it(sample->fp, sample->sp,
sample->sp, js_entry_sp);
while (!it.done() && i < TickSample::kMaxFramesCount) {
Object* object = it.frame()->function_slot_object();
if (object->IsHeapObject()) {
sample->stack[i++] = HeapObject::cast(object)->address();
}
sample->stack[i++] = it.frame()->pc();
it.Advance();
}
sample->frames_count = i;
@ -710,17 +701,6 @@ void Logger::SetterCallbackEvent(String* name, Address entry_point) {
}
#ifdef ENABLE_LOGGING_AND_PROFILING
static const char* ComputeMarker(Code* code) {
switch (code->kind()) {
case Code::FUNCTION: return code->optimizable() ? "~" : "";
case Code::OPTIMIZED_FUNCTION: return "*";
default: return "";
}
}
#endif
void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code,
const char* comment) {
@ -731,7 +711,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
kLogEventsNames[CODE_CREATION_EVENT],
kLogEventsNames[tag]);
msg.AppendAddress(code->address());
msg.Append(",%d,\"%s", code->ExecutableSize(), ComputeMarker(code));
msg.Append(",%d,\"", code->ExecutableSize());
for (const char* p = comment; *p != '\0'; p++) {
if (*p == '"') {
msg.Append('\\');
@ -746,9 +726,40 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
}
void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name) {
void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code,
String* name) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (name != NULL) {
SmartPointer<char> str =
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
CodeCreateEvent(tag, code, *str);
} else {
CodeCreateEvent(tag, code, "");
}
#endif
}
#ifdef ENABLE_LOGGING_AND_PROFILING
// ComputeMarker must only be used when SharedFunctionInfo is known.
static const char* ComputeMarker(Code* code) {
switch (code->kind()) {
case Code::FUNCTION: return code->optimizable() ? "~" : "";
case Code::OPTIMIZED_FUNCTION: return "*";
default: return "";
}
}
#endif
void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
String* name) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log_code) return;
if (code == Builtins::builtin(Builtins::LazyCompile)) return;
LogMessageBuilder msg;
SmartPointer<char> str =
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
@ -756,7 +767,9 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name) {
kLogEventsNames[CODE_CREATION_EVENT],
kLogEventsNames[tag]);
msg.AppendAddress(code->address());
msg.Append(",%d,\"%s%s\"", code->ExecutableSize(), ComputeMarker(code), *str);
msg.Append(",%d,\"%s\",", code->ExecutableSize(), *str);
msg.AppendAddress(shared->address());
msg.Append(",%s", ComputeMarker(code));
LowLevelCodeCreateEvent(code, &msg);
msg.Append('\n');
msg.WriteToLogFile();
@ -764,26 +777,31 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name) {
}
// Although, it is possible to extract source and line from
// the SharedFunctionInfo object, we left it to caller
// to leave logging functions free from heap allocations.
void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code, String* name,
Code* code,
SharedFunctionInfo* shared,
String* source, int line) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg;
SmartPointer<char> str =
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
SmartPointer<char> name =
shared->DebugName()->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
SmartPointer<char> sourcestr =
source->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
msg.Append("%s,%s,",
kLogEventsNames[CODE_CREATION_EVENT],
kLogEventsNames[tag]);
msg.AppendAddress(code->address());
msg.Append(",%d,\"%s%s %s:%d\"",
msg.Append(",%d,\"%s %s:%d\",",
code->ExecutableSize(),
ComputeMarker(code),
*str,
*name,
*sourcestr,
line);
msg.AppendAddress(shared->address());
msg.Append(",%s", ComputeMarker(code));
LowLevelCodeCreateEvent(code, &msg);
msg.Append('\n');
msg.WriteToLogFile();
@ -863,42 +881,9 @@ void Logger::SnapshotPositionEvent(Address addr, int pos) {
}
void Logger::FunctionCreateEvent(JSFunction* function) {
void Logger::SFIMoveEvent(Address from, Address to) {
#ifdef ENABLE_LOGGING_AND_PROFILING
// This function can be called from GC iterators (during Scavenge,
// MC, and MS), so marking bits can be set on objects. That's
// why unchecked accessors are used here.
if (!Log::IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg;
msg.Append("%s,", kLogEventsNames[FUNCTION_CREATION_EVENT]);
msg.AppendAddress(function->address());
msg.Append(',');
msg.AppendAddress(function->unchecked_code()->address());
msg.Append('\n');
msg.WriteToLogFile();
#endif
}
void Logger::FunctionCreateEventFromMove(JSFunction* function) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (function->unchecked_code() != Builtins::builtin(Builtins::LazyCompile)) {
FunctionCreateEvent(function);
}
#endif
}
void Logger::FunctionMoveEvent(Address from, Address to) {
#ifdef ENABLE_LOGGING_AND_PROFILING
MoveEventInternal(FUNCTION_MOVE_EVENT, from, to);
#endif
}
void Logger::FunctionDeleteEvent(Address from) {
#ifdef ENABLE_LOGGING_AND_PROFILING
DeleteEventInternal(FUNCTION_DELETE_EVENT, from);
MoveEventInternal(SFI_MOVE_EVENT, from, to);
#endif
}
@ -1118,7 +1103,7 @@ void Logger::TickEvent(TickSample* sample, bool overflow) {
msg.Append(',');
msg.AppendAddress(sample->sp);
msg.Append(',');
msg.AppendAddress(sample->function);
msg.AppendAddress(sample->tos);
msg.Append(",%d", static_cast<int>(sample->state));
if (overflow) {
msg.Append(",overflow");
@ -1187,7 +1172,6 @@ void Logger::ResumeProfiler(int flags, int tag) {
LOG(UncheckedStringEvent("profiler", "resume"));
FLAG_log_code = true;
LogCompiledFunctions();
LogFunctionObjects();
LogAccessorCallbacks();
if (!FLAG_sliding_state_window && !ticker_->IsActive()) {
ticker_->Start();
@ -1388,10 +1372,9 @@ void Logger::LogCompiledFunctions() {
// During iteration, there can be heap allocation due to
// GetScriptLineNumber call.
for (int i = 0; i < compiled_funcs_count; ++i) {
if (*code_objects[i] == Builtins::builtin(Builtins::LazyCompile)) continue;
Handle<SharedFunctionInfo> shared = sfis[i];
Handle<String> name(String::cast(shared->name()));
Handle<String> func_name(name->length() > 0 ?
*name : shared->inferred_name());
Handle<String> func_name(shared->DebugName());
if (shared->script()->IsScript()) {
Handle<Script> script(Script::cast(shared->script()));
if (script->name()->IsString()) {
@ -1400,18 +1383,18 @@ void Logger::LogCompiledFunctions() {
if (line_num > 0) {
PROFILE(CodeCreateEvent(
Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
*code_objects[i], *func_name,
*code_objects[i], *shared,
*script_name, line_num + 1));
} else {
// Can't distinguish eval and script here, so always use Script.
PROFILE(CodeCreateEvent(
Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
*code_objects[i], *script_name));
*code_objects[i], *shared, *script_name));
}
} else {
PROFILE(CodeCreateEvent(
Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script),
*code_objects[i], *func_name));
*code_objects[i], *shared, *func_name));
}
} else if (shared->IsApiFunction()) {
// API function.
@ -1425,24 +1408,12 @@ void Logger::LogCompiledFunctions() {
}
} else {
PROFILE(CodeCreateEvent(
Logger::LAZY_COMPILE_TAG, *code_objects[i], *func_name));
Logger::LAZY_COMPILE_TAG, *code_objects[i], *shared, *func_name));
}
}
}
void Logger::LogFunctionObjects() {
AssertNoAllocation no_alloc;
HeapIterator iterator;
for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
if (!obj->IsJSFunction()) continue;
JSFunction* jsf = JSFunction::cast(obj);
if (!jsf->is_compiled()) continue;
PROFILE(FunctionCreateEvent(jsf));
}
}
void Logger::LogAccessorCallbacks() {
AssertNoAllocation no_alloc;
HeapIterator iterator;

26
deps/v8/src/log.h vendored
View File

@ -91,9 +91,7 @@ class LogMessageBuilder;
V(CODE_MOVE_EVENT, "code-move") \
V(CODE_DELETE_EVENT, "code-delete") \
V(CODE_MOVING_GC, "code-moving-gc") \
V(FUNCTION_CREATION_EVENT, "function-creation") \
V(FUNCTION_MOVE_EVENT, "function-move") \
V(FUNCTION_DELETE_EVENT, "function-delete") \
V(SFI_MOVE_EVENT, "sfi-move") \
V(SNAPSHOT_POSITION_EVENT, "snapshot-pos") \
V(TICK_EVENT, "tick") \
V(REPEAT_META_EVENT, "repeat") \
@ -205,8 +203,15 @@ class Logger {
// Emits a code create event.
static void CodeCreateEvent(LogEventsAndTags tag,
Code* code, const char* source);
static void CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name);
static void CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name,
static void CodeCreateEvent(LogEventsAndTags tag,
Code* code, String* name);
static void CodeCreateEvent(LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
String* name);
static void CodeCreateEvent(LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
String* source, int line);
static void CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count);
static void CodeMovingGCEvent();
@ -216,13 +221,8 @@ class Logger {
static void CodeMoveEvent(Address from, Address to);
// Emits a code delete event.
static void CodeDeleteEvent(Address from);
// Emits a function object create event.
static void FunctionCreateEvent(JSFunction* function);
static void FunctionCreateEventFromMove(JSFunction* function);
// Emits a function move event.
static void FunctionMoveEvent(Address from, Address to);
// Emits a function delete event.
static void FunctionDeleteEvent(Address from);
static void SFIMoveEvent(Address from, Address to);
static void SnapshotPositionEvent(Address addr, int pos);
@ -273,8 +273,6 @@ class Logger {
// Logs all compiled functions found in the heap.
static void LogCompiledFunctions();
// Logs all compiled JSFunction objects found in the heap.
static void LogFunctionObjects();
// Logs all accessor callbacks found in the heap.
static void LogAccessorCallbacks();
// Used for logging stubs found in the snapshot.

View File

@ -2819,9 +2819,8 @@ int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
HeapObject* copied_to = HeapObject::FromAddress(new_addr);
if (copied_to->IsJSFunction()) {
PROFILE(FunctionMoveEvent(old_addr, new_addr));
PROFILE(FunctionCreateEventFromMove(JSFunction::cast(copied_to)));
if (copied_to->IsSharedFunctionInfo()) {
PROFILE(SFIMoveEvent(old_addr, new_addr));
}
HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
@ -2912,9 +2911,8 @@ int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
#endif
HeapObject* copied_to = HeapObject::FromAddress(new_addr);
if (copied_to->IsJSFunction()) {
PROFILE(FunctionMoveEvent(old_addr, new_addr));
PROFILE(FunctionCreateEventFromMove(JSFunction::cast(copied_to)));
if (copied_to->IsSharedFunctionInfo()) {
PROFILE(SFIMoveEvent(old_addr, new_addr));
}
HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
@ -2931,8 +2929,6 @@ void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (obj->IsCode()) {
PROFILE(CodeDeleteEvent(obj->address()));
} else if (obj->IsJSFunction()) {
PROFILE(FunctionDeleteEvent(obj->address()));
}
#endif
}

View File

@ -2813,6 +2813,12 @@ bool JSObject::ReferencesObject(Object* obj) {
MaybeObject* JSObject::PreventExtensions() {
if (IsAccessCheckNeeded() &&
!Top::MayNamedAccess(this, Heap::undefined_value(), v8::ACCESS_KEYS)) {
Top::ReportFailedAccessCheck(this, v8::ACCESS_KEYS);
return Heap::false_value();
}
if (IsJSGlobalProxy()) {
Object* proto = GetPrototype();
if (proto->IsNull()) return this;
@ -6660,7 +6666,6 @@ bool JSObject::HasElementPostInterceptor(JSObject* receiver, uint32_t index) {
break;
}
case PIXEL_ELEMENTS: {
// TODO(iposva): Add testcase.
PixelArray* pixels = PixelArray::cast(elements());
if (index < static_cast<uint32_t>(pixels->length())) {
return true;
@ -6674,7 +6679,6 @@ bool JSObject::HasElementPostInterceptor(JSObject* receiver, uint32_t index) {
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS: {
// TODO(kbr): Add testcase.
ExternalArray* array = ExternalArray::cast(elements());
if (index < static_cast<uint32_t>(array->length())) {
return true;
@ -7265,11 +7269,7 @@ MaybeObject* JSObject::GetElementPostInterceptor(Object* receiver,
}
break;
}
case PIXEL_ELEMENTS: {
// TODO(iposva): Add testcase and implement.
UNIMPLEMENTED();
break;
}
case PIXEL_ELEMENTS:
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
case EXTERNAL_SHORT_ELEMENTS:
@ -7277,8 +7277,8 @@ MaybeObject* JSObject::GetElementPostInterceptor(Object* receiver,
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS: {
// TODO(kbr): Add testcase and implement.
UNIMPLEMENTED();
MaybeObject* value = GetExternalElement(index);
if (!value->ToObjectUnchecked()->IsUndefined()) return value;
break;
}
case DICTIONARY_ELEMENTS: {
@ -7366,6 +7366,46 @@ MaybeObject* JSObject::GetElementWithReceiver(Object* receiver,
}
break;
}
case PIXEL_ELEMENTS:
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS: {
MaybeObject* value = GetExternalElement(index);
if (!value->ToObjectUnchecked()->IsUndefined()) return value;
break;
}
case DICTIONARY_ELEMENTS: {
NumberDictionary* dictionary = element_dictionary();
int entry = dictionary->FindEntry(index);
if (entry != NumberDictionary::kNotFound) {
Object* element = dictionary->ValueAt(entry);
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.type() == CALLBACKS) {
return GetElementWithCallback(receiver,
element,
index,
this);
}
return element;
}
break;
}
}
Object* pt = GetPrototype();
if (pt == Heap::null_value()) return Heap::undefined_value();
return pt->GetElementWithReceiver(receiver, index);
}
MaybeObject* JSObject::GetExternalElement(uint32_t index) {
// Get element works for both JSObject and JSArray since
// JSArray::length cannot change.
switch (GetElementsKind()) {
case PIXEL_ELEMENTS: {
PixelArray* pixels = PixelArray::cast(elements());
if (index < static_cast<uint32_t>(pixels->length())) {
@ -7433,27 +7473,12 @@ MaybeObject* JSObject::GetElementWithReceiver(Object* receiver,
}
break;
}
case DICTIONARY_ELEMENTS: {
NumberDictionary* dictionary = element_dictionary();
int entry = dictionary->FindEntry(index);
if (entry != NumberDictionary::kNotFound) {
Object* element = dictionary->ValueAt(entry);
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.type() == CALLBACKS) {
return GetElementWithCallback(receiver,
element,
index,
this);
}
return element;
}
case FAST_ELEMENTS:
case DICTIONARY_ELEMENTS:
UNREACHABLE();
break;
}
}
Object* pt = GetPrototype();
if (pt == Heap::null_value()) return Heap::undefined_value();
return pt->GetElementWithReceiver(receiver, index);
return Heap::undefined_value();
}

View File

@ -1549,6 +1549,11 @@ class JSObject: public HeapObject {
MaybeObject* GetElementWithReceiver(Object* receiver, uint32_t index);
MaybeObject* GetElementWithInterceptor(Object* receiver, uint32_t index);
// Get external element value at index if there is one and undefined
// otherwise. Can return a failure if allocation of a heap number
// failed.
MaybeObject* GetExternalElement(uint32_t index);
MUST_USE_RESULT MaybeObject* SetFastElementsCapacityAndLength(int capacity,
int length);
MUST_USE_RESULT MaybeObject* SetSlowElements(Object* length);

View File

@ -1,4 +1,4 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Copyright 2006-2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -71,7 +71,7 @@ void OS::Setup() {
uint64_t OS::CpuFeaturesImpliedByPlatform() {
return 0; // Nothing special about cygwin
return 0; // Nothing special about Cygwin.
}
@ -209,7 +209,7 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile {
OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
FILE* file = fopen(name, "w+");
FILE* file = fopen(name, "r+");
if (file == NULL) return NULL;
fseek(file, 0, SEEK_END);
@ -304,12 +304,12 @@ void OS::LogSharedLibraryAddresses() {
void OS::SignalCodeMovingGC() {
// Nothing to do on Cygwin
// Nothing to do on Cygwin.
}
int OS::StackWalk(Vector<OS::StackFrame> frames) {
// Not supported on Cygwin
// Not supported on Cygwin.
return 0;
}
@ -443,17 +443,36 @@ void Thread::Join() {
}
static inline Thread::LocalStorageKey PthreadKeyToLocalKey(
pthread_key_t pthread_key) {
// We need to cast pthread_key_t to Thread::LocalStorageKey in two steps
// because pthread_key_t is a pointer type on Cygwin. This will probably not
// work on 64-bit platforms, but Cygwin doesn't support 64-bit anyway.
STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
intptr_t ptr_key = reinterpret_cast<intptr_t>(pthread_key);
return static_cast<Thread::LocalStorageKey>(ptr_key);
}
static inline pthread_key_t LocalKeyToPthreadKey(
Thread::LocalStorageKey local_key) {
STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
intptr_t ptr_key = static_cast<intptr_t>(local_key);
return reinterpret_cast<pthread_key_t>(ptr_key);
}
Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
pthread_key_t key;
int result = pthread_key_create(&key, NULL);
USE(result);
ASSERT(result == 0);
return static_cast<LocalStorageKey>(key);
return PthreadKeyToLocalKey(key);
}
void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
int result = pthread_key_delete(pthread_key);
USE(result);
ASSERT(result == 0);
@ -461,13 +480,13 @@ void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
void* Thread::GetThreadLocal(LocalStorageKey key) {
pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
return pthread_getspecific(pthread_key);
}
void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
pthread_key_t pthread_key = static_cast<pthread_key_t>(key);
pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
pthread_setspecific(pthread_key, value);
}
@ -594,7 +613,7 @@ Semaphore* OS::CreateSemaphore(int count) {
// ----------------------------------------------------------------------------
// Cygwin profiler support.
//
// On cygwin we use the same sampler implementation as on win32
// On Cygwin we use the same sampler implementation as on win32.
class Sampler::PlatformData : public Malloced {
public:
@ -698,8 +717,7 @@ void Sampler::Start() {
// Start sampler thread.
DWORD tid;
SetActive(true);
data_->sampler_thread_ = CreateThread(NULL, 0, SamplerEntry, data_, 0,
&tid);
data_->sampler_thread_ = CreateThread(NULL, 0, SamplerEntry, data_, 0, &tid);
// Set thread to high priority to increase sampling accuracy.
SetThreadPriority(data_->sampler_thread_, THREAD_PRIORITY_TIME_CRITICAL);
}

View File

@ -224,7 +224,7 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile {
OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
FILE* file = fopen(name, "w+");
FILE* file = fopen(name, "r+");
if (file == NULL) return NULL;
fseek(file, 0, SEEK_END);

View File

@ -327,7 +327,7 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile {
OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
FILE* file = fopen(name, "w+");
FILE* file = fopen(name, "r+");
if (file == NULL) return NULL;
fseek(file, 0, SEEK_END);

View File

@ -205,7 +205,7 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile {
OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
FILE* file = fopen(name, "w+");
FILE* file = fopen(name, "r+");
if (file == NULL) return NULL;
fseek(file, 0, SEEK_END);

View File

@ -222,7 +222,7 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile {
OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
FILE* file = fopen(name, "w+");
FILE* file = fopen(name, "r+");
if (file == NULL) return NULL;
fseek(file, 0, SEEK_END);

View File

@ -235,7 +235,7 @@ class PosixMemoryMappedFile : public OS::MemoryMappedFile {
OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
FILE* file = fopen(name, "w+");
FILE* file = fopen(name, "r+");
if (file == NULL) return NULL;
fseek(file, 0, SEEK_END);

View File

@ -939,7 +939,7 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
// Open a physical file
HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE,
FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING, 0, NULL);
if (file == NULL) return NULL;
if (file == INVALID_HANDLE_VALUE) return NULL;
int size = static_cast<int>(GetFileSize(file, NULL));

View File

@ -383,14 +383,10 @@ class Thread: public ThreadHandle {
// LOCAL_STORAGE_KEY_MIN_VALUE and LOCAL_STORAGE_KEY_MAX_VALUE are specified
// to ensure that enumeration type has correct value range (see Issue 830 for
// more details).
#ifdef __CYGWIN__
typedef void* LocalStorageKey;
#else
enum LocalStorageKey {
LOCAL_STORAGE_KEY_MIN_VALUE = kMinInt,
LOCAL_STORAGE_KEY_MAX_VALUE = kMaxInt
};
#endif
// Create new thread.
Thread();
@ -571,13 +567,13 @@ class TickSample {
pc(NULL),
sp(NULL),
fp(NULL),
function(NULL),
tos(NULL),
frames_count(0) {}
StateTag state; // The state of the VM.
Address pc; // Instruction pointer.
Address sp; // Stack pointer.
Address fp; // Frame pointer.
Address function; // The last called JS function.
Address pc; // Instruction pointer.
Address sp; // Stack pointer.
Address fp; // Frame pointer.
Address tos; // Top stack value (*sp).
static const int kMaxFramesCount = 64;
Address stack[kMaxFramesCount]; // Call stack.
int frames_count; // Number of captured frames.

View File

@ -45,16 +45,6 @@ const char* StringsStorage::GetFunctionName(const char* name) {
}
CodeEntry::CodeEntry(int security_token_id)
: tag_(Logger::FUNCTION_TAG),
name_prefix_(kEmptyNamePrefix),
name_(""),
resource_name_(""),
line_number_(0),
security_token_id_(security_token_id) {
}
CodeEntry::CodeEntry(Logger::LogEventsAndTags tag,
const char* name_prefix,
const char* name,
@ -66,6 +56,7 @@ CodeEntry::CodeEntry(Logger::LogEventsAndTags tag,
name_(name),
resource_name_(resource_name),
line_number_(line_number),
shared_id_(0),
security_token_id_(security_token_id) {
}

View File

@ -156,13 +156,18 @@ void CodeEntry::CopyData(const CodeEntry& source) {
uint32_t CodeEntry::GetCallUid() const {
uint32_t hash = ComputeIntegerHash(tag_);
hash ^= ComputeIntegerHash(
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_prefix_)));
hash ^= ComputeIntegerHash(
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_)));
hash ^= ComputeIntegerHash(
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(resource_name_)));
hash ^= ComputeIntegerHash(line_number_);
if (shared_id_ != 0) {
hash ^= ComputeIntegerHash(
static_cast<uint32_t>(shared_id_));
} else {
hash ^= ComputeIntegerHash(
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_prefix_)));
hash ^= ComputeIntegerHash(
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_)));
hash ^= ComputeIntegerHash(
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(resource_name_)));
hash ^= ComputeIntegerHash(line_number_);
}
return hash;
}
@ -170,10 +175,12 @@ uint32_t CodeEntry::GetCallUid() const {
bool CodeEntry::IsSameAs(CodeEntry* entry) const {
return this == entry
|| (tag_ == entry->tag_
&& name_prefix_ == entry->name_prefix_
&& name_ == entry->name_
&& resource_name_ == entry->resource_name_
&& line_number_ == entry->line_number_);
&& shared_id_ == entry->shared_id_
&& (shared_id_ != 0
|| (name_prefix_ == entry->name_prefix_
&& name_ == entry->name_
&& resource_name_ == entry->resource_name_
&& line_number_ == entry->line_number_)));
}
@ -458,23 +465,12 @@ void CpuProfile::Print() {
}
CodeEntry* const CodeMap::kSfiCodeEntry = NULL;
const CodeMap::CodeTreeConfig::Key CodeMap::CodeTreeConfig::kNoKey = NULL;
const CodeMap::CodeTreeConfig::Value CodeMap::CodeTreeConfig::kNoValue =
CodeMap::CodeEntryInfo(NULL, 0);
void CodeMap::AddAlias(Address start, CodeEntry* entry, Address code_start) {
CodeTree::Locator locator;
if (tree_.Find(code_start, &locator)) {
const CodeEntryInfo& code_info = locator.value();
if (tree_.Insert(start, &locator)) {
entry->CopyData(*code_info.entry);
locator.set_value(CodeEntryInfo(entry, code_info.size));
}
}
}
CodeEntry* CodeMap::FindEntry(Address addr) {
CodeTree::Locator locator;
if (tree_.FindGreatestLessThan(addr, &locator)) {
@ -487,6 +483,22 @@ CodeEntry* CodeMap::FindEntry(Address addr) {
}
int CodeMap::GetSFITag(Address addr) {
CodeTree::Locator locator;
// For SFI entries, 'size' field is used to store their IDs.
if (tree_.Find(addr, &locator)) {
const CodeEntryInfo& entry = locator.value();
ASSERT(entry.entry == kSfiCodeEntry);
return entry.size;
} else {
tree_.Insert(addr, &locator);
int tag = next_sfi_tag_++;
locator.set_value(CodeEntryInfo(kSfiCodeEntry, tag));
return tag;
}
}
void CodeMap::CodeTreePrinter::Call(
const Address& key, const CodeMap::CodeEntryInfo& value) {
OS::Print("%p %5d %s\n", key, value.size, value.entry->name());
@ -715,13 +727,6 @@ CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag,
}
CodeEntry* CpuProfilesCollection::NewCodeEntry(int security_token_id) {
CodeEntry* entry = new CodeEntry(security_token_id);
code_entries_.Add(entry);
return entry;
}
void CpuProfilesCollection::AddPathToCurrentProfiles(
const Vector<CodeEntry*>& path) {
// As starting / stopping profiles is rare relatively to this
@ -784,19 +789,10 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
if (sample.pc != NULL) {
*entry++ = code_map_.FindEntry(sample.pc);
if (sample.function != NULL) {
*entry = code_map_.FindEntry(sample.function);
if (sample.tos != NULL) {
*entry = code_map_.FindEntry(sample.tos);
if (*entry != NULL && !(*entry)->is_js_function()) {
*entry = NULL;
} else {
CodeEntry* pc_entry = *entries.start();
if (pc_entry == NULL) {
*entry = NULL;
} else if (pc_entry->is_js_function()) {
// Use function entry in favor of pc entry, as function
// entry has security token.
*entries.start() = NULL;
}
}
entry++;
}

View File

@ -88,7 +88,6 @@ class StringsStorage {
class CodeEntry {
public:
explicit INLINE(CodeEntry(int security_token_id));
// CodeEntry doesn't own name strings, just references them.
INLINE(CodeEntry(Logger::LogEventsAndTags tag,
const char* name_prefix,
@ -103,6 +102,8 @@ class CodeEntry {
INLINE(const char* name() const) { return name_; }
INLINE(const char* resource_name() const) { return resource_name_; }
INLINE(int line_number() const) { return line_number_; }
INLINE(int shared_id() const) { return shared_id_; }
INLINE(void set_shared_id(int shared_id)) { shared_id_ = shared_id; }
INLINE(int security_token_id() const) { return security_token_id_; }
INLINE(static bool is_js_function_tag(Logger::LogEventsAndTags tag));
@ -119,6 +120,7 @@ class CodeEntry {
const char* name_;
const char* resource_name_;
int line_number_;
int shared_id_;
int security_token_id_;
DISALLOW_COPY_AND_ASSIGN(CodeEntry);
@ -234,12 +236,12 @@ class CpuProfile {
class CodeMap {
public:
CodeMap() { }
CodeMap() : next_sfi_tag_(1) { }
INLINE(void AddCode(Address addr, CodeEntry* entry, unsigned size));
INLINE(void MoveCode(Address from, Address to));
INLINE(void DeleteCode(Address addr));
void AddAlias(Address start, CodeEntry* entry, Address code_start);
CodeEntry* FindEntry(Address addr);
int GetSFITag(Address addr);
void Print();
@ -267,7 +269,11 @@ class CodeMap {
void Call(const Address& key, const CodeEntryInfo& value);
};
// Fake CodeEntry pointer to distinguish SFI entries.
static CodeEntry* const kSfiCodeEntry;
CodeTree tree_;
int next_sfi_tag_;
DISALLOW_COPY_AND_ASSIGN(CodeMap);
};

View File

@ -1051,6 +1051,12 @@ static MaybeObject* Runtime_DeclareGlobals(Arguments args) {
// Fall-through and introduce the absent property by using
// SetProperty.
} else {
// For const properties, we treat a callback with this name
// even in the prototype as a conflicting declaration.
if (is_const_property && (lookup.type() == CALLBACKS)) {
return ThrowRedeclarationError("const", name);
}
// Otherwise, we check for locally conflicting declarations.
if (is_local && (is_read_only || is_const_property)) {
const char* type = (is_read_only) ? "const" : "var";
return ThrowRedeclarationError(type, name);
@ -1076,29 +1082,34 @@ static MaybeObject* Runtime_DeclareGlobals(Arguments args) {
? static_cast<PropertyAttributes>(base | READ_ONLY)
: base;
if (lookup.IsProperty()) {
// There's a local property that we need to overwrite because
// we're either declaring a function or there's an interceptor
// that claims the property is absent.
// There's a local property that we need to overwrite because
// we're either declaring a function or there's an interceptor
// that claims the property is absent.
//
// Check for conflicting re-declarations. We cannot have
// conflicting types in case of intercepted properties because
// they are absent.
if (lookup.IsProperty() &&
(lookup.type() != INTERCEPTOR) &&
(lookup.IsReadOnly() || is_const_property)) {
const char* type = (lookup.IsReadOnly()) ? "const" : "var";
return ThrowRedeclarationError(type, name);
}
// Check for conflicting re-declarations. We cannot have
// conflicting types in case of intercepted properties because
// they are absent.
if (lookup.type() != INTERCEPTOR &&
(lookup.IsReadOnly() || is_const_property)) {
const char* type = (lookup.IsReadOnly()) ? "const" : "var";
return ThrowRedeclarationError(type, name);
}
RETURN_IF_EMPTY_HANDLE(SetProperty(global, name, value, attributes));
// Safari does not allow the invocation of callback setters for
// function declarations. To mimic this behavior, we do not allow
// the invocation of setters for function values. This makes a
// difference for global functions with the same names as event
// handlers such as "function onload() {}". Firefox does call the
// onload setter in those case and Safari does not. We follow
// Safari for compatibility.
if (value->IsJSFunction()) {
RETURN_IF_EMPTY_HANDLE(SetLocalPropertyIgnoreAttributes(global,
name,
value,
attributes));
} else {
// If a property with this name does not already exist on the
// global object add the property locally. We take special
// precautions to always add it as a local property even in case
// of callbacks in the prototype chain (this rules out using
// SetProperty). Also, we must use the handle-based version to
// avoid GC issues.
RETURN_IF_EMPTY_HANDLE(
SetLocalPropertyIgnoreAttributes(global, name, value, attributes));
RETURN_IF_EMPTY_HANDLE(SetProperty(global, name, value, attributes));
}
}
@ -1186,6 +1197,20 @@ static MaybeObject* Runtime_DeclareContextSlot(Arguments args) {
ASSERT(!context_ext->HasLocalProperty(*name));
Handle<Object> value(Heap::undefined_value());
if (*initial_value != NULL) value = initial_value;
// Declaring a const context slot is a conflicting declaration if
// there is a callback with that name in a prototype. It is
// allowed to introduce const variables in
// JSContextExtensionObjects. They are treated specially in
// SetProperty and no setters are invoked for those since they are
// not real JSObjects.
if (initial_value->IsTheHole() &&
!context_ext->IsJSContextExtensionObject()) {
LookupResult lookup;
context_ext->Lookup(*name, &lookup);
if (lookup.IsProperty() && (lookup.type() == CALLBACKS)) {
return ThrowRedeclarationError("const", name);
}
}
RETURN_IF_EMPTY_HANDLE(SetProperty(context_ext, name, value, mode));
}
@ -1212,11 +1237,7 @@ static MaybeObject* Runtime_InitializeVarGlobal(Arguments args) {
// there, there is a property with this name in the prototype chain.
// We follow Safari and Firefox behavior and only set the property
// locally if there is an explicit initialization value that we have
// to assign to the property. When adding the property we take
// special precautions to always add it as a local property even in
// case of callbacks in the prototype chain (this rules out using
// SetProperty). We have SetLocalPropertyIgnoreAttributes for
// this.
// to assign to the property.
// Note that objects can have hidden prototypes, so we need to traverse
// the whole chain of hidden prototypes to do a 'local' lookup.
JSObject* real_holder = global;
@ -1277,11 +1298,7 @@ static MaybeObject* Runtime_InitializeVarGlobal(Arguments args) {
}
global = Top::context()->global();
if (assign) {
return global->SetLocalPropertyIgnoreAttributes(*name,
args[1],
attributes);
}
if (assign) return global->SetProperty(*name, args[1], attributes);
return Heap::undefined_value();
}
@ -3673,6 +3690,8 @@ static MaybeObject* Runtime_DefineOrRedefineDataProperty(Arguments args) {
is_element) {
// Normalize the elements to enable attributes on the property.
if (js_object->IsJSGlobalProxy()) {
// We do not need to do access checks here since these has already
// been performed by the call to GetOwnProperty.
Handle<Object> proto(js_object->GetPrototype());
// If proxy is detached, ignore the assignment. Alternatively,
// we could throw an exception.
@ -6927,6 +6946,7 @@ static MaybeObject* Runtime_NewObject(Arguments args) {
bool first_allocation = !shared->live_objects_may_exist();
Handle<JSObject> result = Factory::NewJSObject(function);
RETURN_IF_EMPTY_HANDLE(result);
// Delay setting the stub if inobject slack tracking is in progress.
if (first_allocation && !shared->IsInobjectSlackTrackingInProgress()) {
TrySettingInlineConstructStub(function);

12
deps/v8/src/top.cc vendored
View File

@ -735,9 +735,8 @@ Failure* Top::ReThrow(MaybeObject* exception, MessageLocation* location) {
bool can_be_caught_externally = false;
ShouldReportException(&can_be_caught_externally,
is_catchable_by_javascript(exception));
if (can_be_caught_externally) {
thread_local_.catcher_ = try_catch_handler();
}
thread_local_.catcher_ = can_be_caught_externally ?
try_catch_handler() : NULL;
// Set the exception being re-thrown.
set_pending_exception(exception);
@ -913,9 +912,10 @@ void Top::DoThrow(MaybeObject* exception,
}
}
if (can_be_caught_externally) {
thread_local_.catcher_ = try_catch_handler();
}
// Do not forget to clean catcher_ if currently thrown exception cannot
// be caught. If necessary, ReThrow will update the catcher.
thread_local_.catcher_ = can_be_caught_externally ?
try_catch_handler() : NULL;
// NOTE: Notifying the debugger or generating the message
// may have caused new exceptions. For now, we just ignore

5
deps/v8/src/v8.cc vendored
View File

@ -54,7 +54,12 @@ bool V8::Initialize(Deserializer* des) {
if (has_been_disposed_ || has_fatal_error_) return false;
if (IsRunning()) return true;
#if defined(V8_TARGET_ARCH_ARM) && !defined(USE_ARM_EABI)
use_crankshaft_ = false;
#else
use_crankshaft_ = FLAG_crankshaft;
#endif
// Peephole optimization might interfere with deoptimization.
FLAG_peephole_optimization = !use_crankshaft_;
is_running_ = true;

View File

@ -92,7 +92,7 @@ function GlobalIsFinite(number) {
// ECMA-262 - 15.1.2.2
function GlobalParseInt(string, radix) {
if (IS_UNDEFINED(radix)) {
if (IS_UNDEFINED(radix) || radix === 10 || radix === 0) {
// Some people use parseInt instead of Math.floor. This
// optimization makes parseInt on a Smi 12 times faster (60ns
// vs 800ns). The following optimization makes parseInt on a
@ -105,7 +105,7 @@ function GlobalParseInt(string, radix) {
// Truncate number.
return string | 0;
}
radix = 0;
if (IS_UNDEFINED(radix)) radix = 0;
} else {
radix = TO_INT32(radix);
if (!(radix == 0 || (2 <= radix && radix <= 36)))

View File

@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
#define MINOR_VERSION 1
#define BUILD_NUMBER 5
#define BUILD_NUMBER 6
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false

View File

@ -190,13 +190,13 @@ void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
// -----------------------------------------------------------------------------
// Register constants.
const int Register::registerCodeByAllocationIndex[kNumAllocatableRegisters] = {
// rax, rbx, rdx, rcx, rdi, r8, r9, r11, r14, r12
0, 3, 2, 1, 7, 8, 9, 11, 14, 12
const int Register::kRegisterCodeByAllocationIndex[kNumAllocatableRegisters] = {
// rax, rbx, rdx, rcx, rdi, r8, r9, r11, r14, r12
0, 3, 2, 1, 7, 8, 9, 11, 14, 12
};
const int Register::allocationIndexByRegisterCode[kNumRegisters] = {
0, 3, 2, 1, -1, -1, -1, 4, 5, 6, -1, 7, 9, -1, 8, -1
const int Register::kAllocationIndexByRegisterCode[kNumRegisters] = {
0, 3, 2, 1, -1, -1, -1, 4, 5, 6, -1, 7, 9, -1, 8, -1
};
@ -3114,8 +3114,8 @@ void Assembler::RecordDebugBreakSlot() {
}
void Assembler::RecordComment(const char* msg) {
if (FLAG_code_comments) {
void Assembler::RecordComment(const char* msg, bool force) {
if (FLAG_code_comments || force) {
EnsureSpace ensure_space(this);
RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
}

View File

@ -99,12 +99,12 @@ struct Register {
static const int kNumAllocatableRegisters = 10;
static int ToAllocationIndex(Register reg) {
return allocationIndexByRegisterCode[reg.code()];
return kAllocationIndexByRegisterCode[reg.code()];
}
static Register FromAllocationIndex(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
Register result = { registerCodeByAllocationIndex[index] };
Register result = { kRegisterCodeByAllocationIndex[index] };
return result;
}
@ -155,8 +155,8 @@ struct Register {
int code_;
private:
static const int registerCodeByAllocationIndex[kNumAllocatableRegisters];
static const int allocationIndexByRegisterCode[kNumRegisters];
static const int kRegisterCodeByAllocationIndex[kNumAllocatableRegisters];
static const int kAllocationIndexByRegisterCode[kNumRegisters];
};
const Register rax = { 0 };
@ -1312,7 +1312,7 @@ class Assembler : public Malloced {
// Record a comment relocation entry that can be used by a disassembler.
// Use --code-comments to enable.
void RecordComment(const char* msg);
void RecordComment(const char* msg, bool force = false);
// Writes a single word of data in the code stream.
// Used for inline tables, e.g., jump-tables.

View File

@ -1336,54 +1336,33 @@ void TypeRecordingBinaryOpStub::GenerateFloatingPointCode(
void TypeRecordingBinaryOpStub::GenerateStringAddCode(MacroAssembler* masm) {
GenerateRegisterArgsPush(masm);
ASSERT(op_ == Token::ADD);
NearLabel left_not_string, call_runtime;
// Registers containing left and right operands respectively.
Register lhs = rdx;
Register rhs = rax;
Register left = rdx;
Register right = rax;
// Test for string arguments before calling runtime.
Label not_strings, both_strings, not_string1, string1, string1_smi2;
// Test if left operand is a string.
__ JumpIfSmi(left, &left_not_string);
__ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
__ j(above_equal, &left_not_string);
StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
GenerateRegisterArgsPush(masm);
__ TailCallStub(&string_add_left_stub);
__ JumpIfNotString(lhs, r8, &not_string1);
// Left operand is not a string, test right.
__ bind(&left_not_string);
__ JumpIfSmi(right, &call_runtime);
__ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
__ j(above_equal, &call_runtime);
// First argument is a a string, test second.
__ JumpIfSmi(rhs, &string1_smi2);
__ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9);
__ j(above_equal, &string1);
StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
GenerateRegisterArgsPush(masm);
__ TailCallStub(&string_add_right_stub);
// First and second argument are strings.
StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
__ TailCallStub(&string_add_stub);
__ bind(&string1_smi2);
// First argument is a string, second is a smi. Try to lookup the number
// string for the smi in the number string cache.
NumberToStringStub::GenerateLookupNumberStringCache(
masm, rhs, rbx, rcx, r8, true, &string1);
// Replace second argument on stack and tailcall string add stub to make
// the result.
__ movq(Operand(rsp, 1 * kPointerSize), rbx);
__ TailCallStub(&string_add_stub);
// Only first argument is a string.
__ bind(&string1);
__ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
// First argument was not a string, test second.
__ bind(&not_string1);
__ JumpIfNotString(rhs, rhs, &not_strings);
// Only second argument is a string.
__ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
__ bind(&not_strings);
// Neither argument is a string.
// Pop arguments, because CallRuntimeCode wants to push them again.
__ pop(rcx);
__ pop(rax);
__ pop(rdx);
__ push(rcx);
__ bind(&call_runtime);
}
@ -1440,9 +1419,11 @@ void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
ASSERT(operands_type_ == TRBinaryOpIC::STRING);
ASSERT(op_ == Token::ADD);
GenerateStringAddCode(masm);
// Try to add arguments as strings, otherwise, transition to the generic
// TRBinaryOpIC type.
GenerateTypeTransition(masm);
}
@ -3461,6 +3442,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// is and instance of the function and anything else to
// indicate that the value is not an instance.
// None of the flags are supported on X64.
ASSERT(flags_ == kNoFlags);
// Get the object - go slow case if it's a smi.
Label slow;
__ movq(rax, Operand(rsp, 2 * kPointerSize));
@ -3536,10 +3520,11 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
Register InstanceofStub::left() { return rax; }
// Passing arguments in registers is not supported.
Register InstanceofStub::left() { return no_reg; }
Register InstanceofStub::right() { return rdx; }
Register InstanceofStub::right() { return no_reg; }
int CompareStub::MinorKey() {
@ -3798,14 +3783,15 @@ void StringCharAtGenerator::GenerateSlow(
void StringAddStub::Generate(MacroAssembler* masm) {
Label string_add_runtime;
Label string_add_runtime, call_builtin;
Builtins::JavaScript builtin_id = Builtins::ADD;
// Load the two arguments.
__ movq(rax, Operand(rsp, 2 * kPointerSize)); // First argument.
__ movq(rdx, Operand(rsp, 1 * kPointerSize)); // Second argument.
__ movq(rax, Operand(rsp, 2 * kPointerSize)); // First argument (left).
__ movq(rdx, Operand(rsp, 1 * kPointerSize)); // Second argument (right).
// Make sure that both arguments are strings if not known in advance.
if (string_check_) {
if (flags_ == NO_STRING_ADD_FLAGS) {
Condition is_smi;
is_smi = masm->CheckSmi(rax);
__ j(is_smi, &string_add_runtime);
@ -3817,6 +3803,20 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ j(is_smi, &string_add_runtime);
__ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9);
__ j(above_equal, &string_add_runtime);
} else {
// Here at least one of the arguments is definitely a string.
// We convert the one that is not known to be a string.
if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
GenerateConvertArgument(masm, 2 * kPointerSize, rax, rbx, rcx, rdi,
&call_builtin);
builtin_id = Builtins::STRING_ADD_RIGHT;
} else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
GenerateConvertArgument(masm, 1 * kPointerSize, rdx, rbx, rcx, rdi,
&call_builtin);
builtin_id = Builtins::STRING_ADD_LEFT;
}
}
// Both arguments are strings.
@ -3844,14 +3844,14 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// rbx: length of first string
// rcx: length of second string
// rdx: second string
// r8: map of first string if string check was performed above
// r9: map of second string if string check was performed above
// r8: map of first string (if flags_ == NO_STRING_ADD_FLAGS)
// r9: map of second string (if flags_ == NO_STRING_ADD_FLAGS)
Label string_add_flat_result, longer_than_two;
__ bind(&both_not_zero_length);
// If arguments where known to be strings, maps are not loaded to r8 and r9
// by the code above.
if (!string_check_) {
if (flags_ != NO_STRING_ADD_FLAGS) {
__ movq(r8, FieldOperand(rax, HeapObject::kMapOffset));
__ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
}
@ -4037,6 +4037,54 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to add the two strings.
__ bind(&string_add_runtime);
__ TailCallRuntime(Runtime::kStringAdd, 2, 1);
if (call_builtin.is_linked()) {
__ bind(&call_builtin);
__ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
}
}
void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
int stack_offset,
Register arg,
Register scratch1,
Register scratch2,
Register scratch3,
Label* slow) {
// First check if the argument is already a string.
Label not_string, done;
__ JumpIfSmi(arg, &not_string);
__ CmpObjectType(arg, FIRST_NONSTRING_TYPE, scratch1);
__ j(below, &done);
// Check the number to string cache.
Label not_cached;
__ bind(&not_string);
// Puts the cached result into scratch1.
NumberToStringStub::GenerateLookupNumberStringCache(masm,
arg,
scratch1,
scratch2,
scratch3,
false,
&not_cached);
__ movq(arg, scratch1);
__ movq(Operand(rsp, stack_offset), arg);
__ jmp(&done);
// Check if the argument is a safe string wrapper.
__ bind(&not_cached);
__ JumpIfSmi(arg, slow);
__ CmpObjectType(arg, JS_VALUE_TYPE, scratch1); // map -> scratch1.
__ j(not_equal, slow);
__ testb(FieldOperand(scratch1, Map::kBitField2Offset),
Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
__ j(zero, slow);
__ movq(arg, FieldOperand(arg, JSValue::kValueOffset));
__ movq(Operand(rsp, stack_offset), arg);
__ bind(&done);
}

View File

@ -360,24 +360,35 @@ class StringHelper : public AllStatic {
// Flag that indicates how to generate code for the stub StringAddStub.
enum StringAddFlags {
NO_STRING_ADD_FLAGS = 0,
NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub.
// Omit left string check in stub (left is definitely a string).
NO_STRING_CHECK_LEFT_IN_STUB = 1 << 0,
// Omit right string check in stub (right is definitely a string).
NO_STRING_CHECK_RIGHT_IN_STUB = 1 << 1,
// Omit both string checks in stub.
NO_STRING_CHECK_IN_STUB =
NO_STRING_CHECK_LEFT_IN_STUB | NO_STRING_CHECK_RIGHT_IN_STUB
};
class StringAddStub: public CodeStub {
public:
explicit StringAddStub(StringAddFlags flags) {
string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
}
explicit StringAddStub(StringAddFlags flags) : flags_(flags) {}
private:
Major MajorKey() { return StringAdd; }
int MinorKey() { return string_check_ ? 0 : 1; }
int MinorKey() { return flags_; }
void Generate(MacroAssembler* masm);
// Should the stub check whether arguments are strings?
bool string_check_;
void GenerateConvertArgument(MacroAssembler* masm,
int stack_offset,
Register arg,
Register scratch1,
Register scratch2,
Register scratch3,
Label* slow);
const StringAddFlags flags_;
};

View File

@ -7239,8 +7239,8 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
if (variable != NULL) {
// Delete of an unqualified identifier is disallowed in strict mode
// so this code can only be reached in non-strict mode.
ASSERT(strict_mode_flag() == kNonStrictMode);
// but "delete this" is.
ASSERT(strict_mode_flag() == kNonStrictMode || variable->is_this());
Slot* slot = variable->AsSlot();
if (variable->is_global()) {
LoadGlobal();
@ -7249,7 +7249,6 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
CALL_FUNCTION, 3);
frame_->Push(&answer);
return;
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
// Call the runtime to delete from the context holding the named
@ -7260,13 +7259,11 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
frame_->EmitPush(variable->name());
Result answer = frame_->CallRuntime(Runtime::kDeleteContextSlot, 2);
frame_->Push(&answer);
return;
} else {
// Default: Result of deleting non-global, not dynamically
// introduced variables is false.
frame_->Push(Factory::false_value());
}
// Default: Result of deleting non-global, not dynamically
// introduced variables is false.
frame_->Push(Factory::false_value());
} else {
// Default: Result of deleting expressions is true.
Load(node->expression()); // may have side-effects

View File

@ -358,14 +358,16 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
fp_value, output_offset, value);
}
// The context can be gotten from the function so long as we don't
// optimize functions that need local contexts.
// For the bottommost output frame the context can be gotten from the input
// frame. For all subsequent output frames it can be gotten from the function
// so long as we don't inline functions that need local contexts.
output_offset -= kPointerSize;
input_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(function->context());
// The context for the bottommost output frame should also agree with the
// input frame.
ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
if (is_bottommost) {
value = input_->GetFrameSlot(input_offset);
} else {
value = reinterpret_cast<intptr_t>(function->context());
}
output_frame->SetFrameSlot(output_offset, value);
if (is_topmost) output_frame->SetRegister(rsi.code(), value);
if (FLAG_trace_deopt) {

View File

@ -3075,8 +3075,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
}
} else if (var != NULL) {
// Delete of an unqualified identifier is disallowed in strict mode
// so this code can only be reached in non-strict mode.
ASSERT(strict_mode_flag() == kNonStrictMode);
// but "delete this" is.
ASSERT(strict_mode_flag() == kNonStrictMode || var->is_this());
if (var->is_global()) {
__ push(GlobalObjectOperand());
__ Push(var->name());
@ -3114,16 +3114,22 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
case Token::NOT: {
Comment cmnt(masm_, "[ UnaryOperation (NOT)");
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
// Notice that the labels are swapped.
context()->PrepareTest(&materialize_true, &materialize_false,
&if_false, &if_true, &fall_through);
if (context()->IsTest()) ForwardBailoutToChild(expr);
VisitForControl(expr->expression(), if_true, if_false, fall_through);
context()->Plug(if_false, if_true); // Labels swapped.
if (context()->IsEffect()) {
// Unary NOT has no side effects so it's only necessary to visit the
// subexpression. Match the optimizing compiler by not branching.
VisitForEffect(expr->expression());
} else {
Label materialize_true, materialize_false;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
// Notice that the labels are swapped.
context()->PrepareTest(&materialize_true, &materialize_false,
&if_false, &if_true, &fall_through);
if (context()->IsTest()) ForwardBailoutToChild(expr);
VisitForControl(expr->expression(), if_true, if_false, fall_through);
context()->Plug(if_false, if_true); // Labels swapped.
}
break;
}

File diff suppressed because it is too large Load Diff

View File

@ -90,8 +90,8 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
void DoDeferredStackCheck(LGoto* instr);
void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
@ -117,6 +117,10 @@ class LCodeGen BASE_EMBEDDED {
bool is_done() const { return status_ == DONE; }
bool is_aborted() const { return status_ == ABORTED; }
int strict_mode_flag() const {
return info_->is_strict() ? kStrictMode : kNonStrictMode;
}
LChunk* chunk() const { return chunk_; }
Scope* scope() const { return scope_; }
HGraph* graph() const { return chunk_->graph(); }
@ -197,6 +201,7 @@ class LCodeGen BASE_EMBEDDED {
int arguments,
int deoptimization_index);
void RecordSafepoint(LPointerMap* pointers, int deoptimization_index);
void RecordSafepoint(int deoptimization_index);
void RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
int deoptimization_index);
@ -225,6 +230,9 @@ class LCodeGen BASE_EMBEDDED {
// Caller should branch on equal condition.
void EmitIsConstructCall(Register temp);
// Emits code for pushing a constant operand.
void EmitPushConstantOperand(LOperand* operand);
LChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;

View File

@ -296,8 +296,15 @@ void LLoadContextSlot::PrintDataTo(StringStream* stream) {
}
void LStoreContextSlot::PrintDataTo(StringStream* stream) {
InputAt(0)->PrintTo(stream);
stream->Add("[%d] <- ", slot_index());
InputAt(1)->PrintTo(stream);
}
void LCallKeyed::PrintDataTo(StringStream* stream) {
stream->Add("[ecx] #%d / ", arity());
stream->Add("[rcx] #%d / ", arity());
}
@ -398,7 +405,7 @@ void LChunk::MarkEmptyBlocks() {
}
void LStoreNamed::PrintDataTo(StringStream* stream) {
void LStoreNamedField::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(".");
stream->Add(*String::cast(*name())->ToCString());
@ -407,7 +414,25 @@ void LStoreNamed::PrintDataTo(StringStream* stream) {
}
void LStoreKeyed::PrintDataTo(StringStream* stream) {
void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add(".");
stream->Add(*String::cast(*name())->ToCString());
stream->Add(" <- ");
value()->PrintTo(stream);
}
void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
stream->Add("] <- ");
value()->PrintTo(stream);
}
void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
@ -1081,9 +1106,8 @@ LInstruction* LChunkBuilder::DoTest(HTest* instr) {
} else if (v->IsInstanceOf()) {
HInstanceOf* instance_of = HInstanceOf::cast(v);
LInstanceOfAndBranch* result =
new LInstanceOfAndBranch(
UseFixed(instance_of->left(), InstanceofStub::left()),
UseFixed(instance_of->right(), InstanceofStub::right()));
new LInstanceOfAndBranch(UseFixed(instance_of->left(), rax),
UseFixed(instance_of->right(), rdx));
return MarkAsCall(result, instr);
} else if (v->IsTypeofIs()) {
HTypeofIs* typeof_is = HTypeofIs::cast(v);
@ -1124,21 +1148,32 @@ LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
Abort("Unimplemented: %s", "DoInstanceOf");
return NULL;
LOperand* left = UseFixed(instr->left(), rax);
LOperand* right = UseFixed(instr->right(), rdx);
LInstanceOf* result = new LInstanceOf(left, right);
return MarkAsCall(DefineFixed(result, rax), instr);
}
LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
HInstanceOfKnownGlobal* instr) {
Abort("Unimplemented: %s", "DoInstanceOfKnownGlobal");
return NULL;
LInstanceOfKnownGlobal* result =
new LInstanceOfKnownGlobal(UseRegisterAtStart(instr->value()));
MarkAsSaveDoubles(result);
return AssignEnvironment(AssignPointerMap(DefineFixed(result, rax)));
}
LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
Abort("Unimplemented: %s", "DoApplyArguments");
return NULL;
LOperand* function = UseFixed(instr->function(), rdi);
LOperand* receiver = UseFixed(instr->receiver(), rax);
LOperand* length = UseFixed(instr->length(), rbx);
LOperand* elements = UseFixed(instr->elements(), rcx);
LApplyArguments* result = new LApplyArguments(function,
receiver,
length,
elements);
return MarkAsCall(DefineFixed(result, rax), instr, CAN_DEOPTIMIZE_EAGERLY);
}
@ -1155,8 +1190,8 @@ LInstruction* LChunkBuilder::DoContext(HContext* instr) {
LInstruction* LChunkBuilder::DoOuterContext(HOuterContext* instr) {
Abort("Unimplemented: DoOuterContext");
return NULL;
LOperand* context = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LOuterContext(context));
}
@ -1178,14 +1213,39 @@ LInstruction* LChunkBuilder::DoCallConstantFunction(
LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
Abort("Unimplemented: %s", "DoUnaryMathOperation");
return NULL;
BuiltinFunctionId op = instr->op();
if (op == kMathLog || op == kMathSin || op == kMathCos) {
LOperand* input = UseFixedDouble(instr->value(), xmm1);
LUnaryMathOperation* result = new LUnaryMathOperation(input);
return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
} else {
LOperand* input = UseRegisterAtStart(instr->value());
LUnaryMathOperation* result = new LUnaryMathOperation(input);
switch (op) {
case kMathAbs:
return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
case kMathFloor:
return AssignEnvironment(DefineAsRegister(result));
case kMathRound:
return AssignEnvironment(DefineAsRegister(result));
case kMathSqrt:
return DefineSameAsFirst(result);
case kMathPowHalf:
return DefineSameAsFirst(result);
default:
UNREACHABLE();
return NULL;
}
}
}
LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
Abort("Unimplemented: %s", "DoCallKeyed");
return NULL;
ASSERT(instr->key()->representation().IsTagged());
LOperand* key = UseFixed(instr->key(), rcx);
argument_count_ -= instr->argument_count();
LCallKeyed* result = new LCallKeyed(key);
return MarkAsCall(DefineFixed(result, rax), instr);
}
@ -1216,8 +1276,9 @@ LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
Abort("Unimplemented: %s", "DoCallFunction");
return NULL;
argument_count_ -= instr->argument_count();
LCallFunction* result = new LCallFunction();
return MarkAsCall(DefineFixed(result, rax), instr);
}
@ -1285,8 +1346,32 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
LInstruction* LChunkBuilder::DoMod(HMod* instr) {
Abort("Unimplemented: %s", "DoMod");
return NULL;
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
// The temporary operand is necessary to ensure that right is not allocated
// into edx.
LOperand* temp = FixedTemp(rdx);
LOperand* value = UseFixed(instr->left(), rax);
LOperand* divisor = UseRegister(instr->right());
LModI* mod = new LModI(value, divisor, temp);
LInstruction* result = DefineFixed(mod, rdx);
return (instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
instr->CheckFlag(HValue::kCanBeDivByZero))
? AssignEnvironment(result)
: result;
} else if (instr->representation().IsTagged()) {
return DoArithmeticT(Token::MOD, instr);
} else {
ASSERT(instr->representation().IsDouble());
// We call a C function for double modulo. It can't trigger a GC.
// We need to use fixed result register for the call.
// TODO(fschneider): Allow any register as input registers.
LOperand* left = UseFixedDouble(instr->left(), xmm1);
LOperand* right = UseFixedDouble(instr->right(), xmm2);
LArithmeticD* result = new LArithmeticD(Token::MOD, left, right);
return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
}
}
@ -1461,8 +1546,9 @@ LInstruction* LChunkBuilder::DoPixelArrayLength(HPixelArrayLength* instr) {
LInstruction* LChunkBuilder::DoValueOf(HValueOf* instr) {
Abort("Unimplemented: %s", "DoValueOf");
return NULL;
LOperand* object = UseRegister(instr->value());
LValueOf* result = new LValueOf(object);
return AssignEnvironment(DefineSameAsFirst(result));
}
@ -1519,12 +1605,8 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
return AssignPointerMap(Define(result, result_temp));
} else {
ASSERT(to.IsInteger32());
bool needs_temp = instr->CanTruncateToInt32() &&
!CpuFeatures::IsSupported(SSE3);
LOperand* value = needs_temp ?
UseTempRegister(instr->value()) : UseRegister(instr->value());
LOperand* temp = needs_temp ? TempRegister() : NULL;
return AssignEnvironment(DefineAsRegister(new LDoubleToI(value, temp)));
LOperand* value = UseRegister(instr->value());
return AssignEnvironment(DefineAsRegister(new LDoubleToI(value)));
}
} else if (from.IsInteger32()) {
if (to.IsTagged()) {
@ -1622,14 +1704,23 @@ LInstruction* LChunkBuilder::DoStoreGlobal(HStoreGlobal* instr) {
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
Abort("Unimplemented: %s", "DoLoadContextSlot");
return NULL;
LOperand* context = UseRegisterAtStart(instr->value());
return DefineAsRegister(new LLoadContextSlot(context));
}
LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
Abort("Unimplemented: DoStoreContextSlot");
return NULL;
Abort("Unimplemented: DoStoreContextSlot"); // Temporarily disabled (whesse).
LOperand* context;
LOperand* value;
if (instr->NeedsWriteBarrier()) {
context = UseTempRegister(instr->context());
value = UseTempRegister(instr->value());
} else {
context = UseRegister(instr->context());
value = UseRegister(instr->value());
}
return new LStoreContextSlot(context, value);
}
@ -1692,8 +1783,11 @@ LInstruction* LChunkBuilder::DoLoadPixelArrayElement(
LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
Abort("Unimplemented: %s", "DoLoadKeyedGeneric");
return NULL;
LOperand* object = UseFixed(instr->object(), rdx);
LOperand* key = UseFixed(instr->key(), rax);
LLoadKeyedGeneric* result = new LLoadKeyedGeneric(object, key);
return MarkAsCall(DefineFixed(result, rax), instr);
}
@ -1716,9 +1810,31 @@ LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
}
LInstruction* LChunkBuilder::DoStorePixelArrayElement(
HStorePixelArrayElement* instr) {
ASSERT(instr->value()->representation().IsInteger32());
ASSERT(instr->external_pointer()->representation().IsExternal());
ASSERT(instr->key()->representation().IsInteger32());
LOperand* external_pointer = UseRegister(instr->external_pointer());
LOperand* val = UseTempRegister(instr->value());
LOperand* key = UseRegister(instr->key());
return new LStorePixelArrayElement(external_pointer, key, val);
}
LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
Abort("Unimplemented: %s", "DoStoreKeyedGeneric");
return NULL;
LOperand* object = UseFixed(instr->object(), rdx);
LOperand* key = UseFixed(instr->key(), rcx);
LOperand* value = UseFixed(instr->value(), rax);
ASSERT(instr->object()->representation().IsTagged());
ASSERT(instr->key()->representation().IsTagged());
ASSERT(instr->value()->representation().IsTagged());
LStoreKeyedGeneric* result = new LStoreKeyedGeneric(object, key, value);
return MarkAsCall(result, instr);
}
@ -1743,14 +1859,19 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
Abort("Unimplemented: %s", "DoStoreNamedGeneric");
return NULL;
LOperand* object = UseFixed(instr->object(), rdx);
LOperand* value = UseFixed(instr->value(), rax);
LStoreNamedGeneric* result = new LStoreNamedGeneric(object, value);
return MarkAsCall(result, instr);
}
LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
Abort("Unimplemented: %s", "DoStringCharCodeAt");
return NULL;
LOperand* string = UseRegister(instr->string());
LOperand* index = UseRegisterOrConstant(instr->index());
LStringCharCodeAt* result = new LStringCharCodeAt(string, index);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
@ -1771,8 +1892,7 @@ LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
Abort("Unimplemented: %s", "DoRegExpLiteral");
return NULL;
return MarkAsCall(DefineFixed(new LRegExpLiteral, rax), instr);
}
@ -1782,14 +1902,16 @@ LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
LInstruction* LChunkBuilder::DoDeleteProperty(HDeleteProperty* instr) {
Abort("Unimplemented: %s", "DoDeleteProperty");
return NULL;
LDeleteProperty* result =
new LDeleteProperty(Use(instr->object()), UseOrConstant(instr->key()));
return MarkAsCall(DefineFixed(result, rax), instr);
}
LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
Abort("Unimplemented: %s", "DoOsrEntry");
return NULL;
allocator_->MarkAsOsrEntry();
current_block_->last_environment()->set_ast_id(instr->ast_id());
return AssignEnvironment(new LOsrEntry);
}
@ -1800,8 +1922,8 @@ LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
Abort("Unimplemented: %s", "DoUnknownOSRValue");
return NULL;
int spill_index = chunk()->GetNextSpillIndex(false); // Not double-width.
return DefineAsSpilled(new LUnknownOSRValue, spill_index);
}
@ -1812,7 +1934,10 @@ LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
Abort("Unimplemented: %s", "DoArgumentsObject");
// There are no real uses of the arguments object.
// arguments.length and element access are supported directly on
// stack arguments, and any real arguments object use causes a bailout.
// So this value is never used.
return NULL;
}
@ -1827,14 +1952,13 @@ LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
Abort("Unimplemented: %s", "DoTypeof");
return NULL;
LTypeof* result = new LTypeof(UseAtStart(instr->value()));
return MarkAsCall(DefineFixed(result, rax), instr);
}
LInstruction* LChunkBuilder::DoTypeofIs(HTypeofIs* instr) {
Abort("Unimplemented: %s", "DoTypeofIs");
return NULL;
return DefineSameAsFirst(new LTypeofIs(UseRegister(instr->value())));
}

View File

@ -42,8 +42,6 @@ class LCodeGen;
#define LITHIUM_ALL_INSTRUCTION_LIST(V) \
V(ControlInstruction) \
V(Call) \
V(StoreKeyed) \
V(StoreNamed) \
LITHIUM_CONCRETE_INSTRUCTION_LIST(V)
@ -131,6 +129,7 @@ class LCodeGen;
V(NumberUntagD) \
V(ObjectLiteral) \
V(OsrEntry) \
V(OuterContext) \
V(Parameter) \
V(PixelArrayLength) \
V(Power) \
@ -141,11 +140,14 @@ class LCodeGen;
V(SmiTag) \
V(SmiUntag) \
V(StackCheck) \
V(StoreContextSlot) \
V(StoreGlobal) \
V(StoreKeyedFastElement) \
V(StoreKeyedGeneric) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
V(StorePixelArrayElement) \
V(StringCharCodeAt) \
V(StringLength) \
V(SubI) \
V(TaggedToI) \
@ -830,11 +832,10 @@ class LInstanceOfAndBranch: public LControlInstruction<2, 0> {
};
class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 0> {
public:
LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
explicit LInstanceOfKnownGlobal(LOperand* value) {
inputs_[0] = value;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
@ -1005,11 +1006,10 @@ class LFixedArrayLength: public LTemplateInstruction<1, 1, 0> {
};
class LValueOf: public LTemplateInstruction<1, 1, 1> {
class LValueOf: public LTemplateInstruction<1, 1, 0> {
public:
LValueOf(LOperand* value, LOperand* temp) {
explicit LValueOf(LOperand* value) {
inputs_[0] = value;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(ValueOf, "value-of")
@ -1246,6 +1246,25 @@ class LLoadContextSlot: public LTemplateInstruction<1, 1, 0> {
};
class LStoreContextSlot: public LTemplateInstruction<0, 2, 0> {
public:
LStoreContextSlot(LOperand* context, LOperand* value) {
inputs_[0] = context;
inputs_[1] = value;
}
DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
LOperand* context() { return InputAt(0); }
LOperand* value() { return InputAt(1); }
int slot_index() { return hydrogen()->slot_index(); }
int needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
virtual void PrintDataTo(StringStream* stream);
};
class LPushArgument: public LTemplateInstruction<0, 1, 0> {
public:
explicit LPushArgument(LOperand* value) {
@ -1262,6 +1281,18 @@ class LContext: public LTemplateInstruction<1, 0, 0> {
};
class LOuterContext: public LTemplateInstruction<1, 1, 0> {
public:
explicit LOuterContext(LOperand* context) {
inputs_[0] = context;
}
DECLARE_CONCRETE_INSTRUCTION(OuterContext, "outer-context")
LOperand* context() { return InputAt(0); }
};
class LGlobalObject: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(GlobalObject, "global-object")
@ -1295,6 +1326,8 @@ class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallKeyed, "call-keyed")
DECLARE_HYDROGEN_ACCESSOR(CallKeyed)
LOperand* key() { return inputs_[0]; }
virtual void PrintDataTo(StringStream* stream);
int arity() const { return hydrogen()->argument_count() - 1; }
@ -1315,6 +1348,8 @@ class LCallNamed: public LTemplateInstruction<1, 0, 0> {
class LCallFunction: public LTemplateInstruction<1, 0, 0> {
public:
LCallFunction() {}
DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
DECLARE_HYDROGEN_ACCESSOR(CallFunction)
@ -1403,11 +1438,10 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 1> {
// Sometimes truncating conversion from a tagged value to an int32.
class LDoubleToI: public LTemplateInstruction<1, 1, 1> {
class LDoubleToI: public LTemplateInstruction<1, 1, 0> {
public:
LDoubleToI(LOperand* value, LOperand* temp) {
explicit LDoubleToI(LOperand* value) {
inputs_[0] = value;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
@ -1468,15 +1502,39 @@ class LSmiUntag: public LTemplateInstruction<1, 1, 0> {
};
class LStoreNamed: public LTemplateInstruction<0, 2, 1> {
class LStoreNamedField: public LTemplateInstruction<0, 2, 1> {
public:
LStoreNamed(LOperand* obj, LOperand* val) {
inputs_[0] = obj;
inputs_[1] = val;
LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
inputs_[0] = object;
inputs_[1] = value;
temps_[0] = temp;
}
DECLARE_INSTRUCTION(StoreNamed)
DECLARE_HYDROGEN_ACCESSOR(StoreNamed)
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
virtual void PrintDataTo(StringStream* stream);
LOperand* object() { return inputs_[0]; }
LOperand* value() { return inputs_[1]; }
Handle<Object> name() const { return hydrogen()->name(); }
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
Handle<Map> transition() const { return hydrogen()->transition(); }
};
class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
public:
LStoreNamedGeneric(LOperand* object, LOperand* value) {
inputs_[0] = object;
inputs_[1] = value;
}
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
virtual void PrintDataTo(StringStream* stream);
@ -1486,42 +1544,17 @@ class LStoreNamed: public LTemplateInstruction<0, 2, 1> {
};
class LStoreNamedField: public LStoreNamed {
class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
public:
LStoreNamedField(LOperand* obj, LOperand* val, LOperand* temp)
: LStoreNamed(obj, val) {
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
bool is_in_object() { return hydrogen()->is_in_object(); }
int offset() { return hydrogen()->offset(); }
bool needs_write_barrier() { return hydrogen()->NeedsWriteBarrier(); }
Handle<Map> transition() const { return hydrogen()->transition(); }
};
class LStoreNamedGeneric: public LStoreNamed {
public:
LStoreNamedGeneric(LOperand* obj, LOperand* val)
: LStoreNamed(obj, val) { }
DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
};
class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) {
LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) {
inputs_[0] = obj;
inputs_[1] = key;
inputs_[2] = val;
}
DECLARE_INSTRUCTION(StoreKeyed)
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
"store-keyed-fast-element")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
virtual void PrintDataTo(StringStream* stream);
@ -1531,23 +1564,56 @@ class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
};
class LStoreKeyedFastElement: public LStoreKeyed {
class LStorePixelArrayElement: public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val)
: LStoreKeyed(obj, key, val) {}
LStorePixelArrayElement(LOperand* external_pointer,
LOperand* key,
LOperand* val) {
inputs_[0] = external_pointer;
inputs_[1] = key;
inputs_[2] = val;
}
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
"store-keyed-fast-element")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
DECLARE_CONCRETE_INSTRUCTION(StorePixelArrayElement,
"store-pixel-array-element")
DECLARE_HYDROGEN_ACCESSOR(StorePixelArrayElement)
LOperand* external_pointer() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
};
class LStoreKeyedGeneric: public LStoreKeyed {
class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyedGeneric(LOperand* obj, LOperand* key, LOperand* val)
: LStoreKeyed(obj, key, val) { }
LStoreKeyedGeneric(LOperand* object, LOperand* key, LOperand* value) {
inputs_[0] = object;
inputs_[1] = key;
inputs_[2] = value;
}
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
virtual void PrintDataTo(StringStream* stream);
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
};
class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
public:
LStringCharCodeAt(LOperand* string, LOperand* index) {
inputs_[0] = string;
inputs_[1] = index;
}
DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
LOperand* string() { return inputs_[0]; }
LOperand* index() { return inputs_[1]; }
};

View File

@ -623,7 +623,9 @@ MaybeObject* MacroAssembler::TryJumpToExternalReference(
}
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag,
PostCallGenerator* post_call_generator) {
// Calls are not allowed in some stubs.
ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
@ -632,7 +634,7 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag) {
// parameter count to avoid emitting code to do the check.
ParameterCount expected(0);
GetBuiltinEntry(rdx, id);
InvokeCode(rdx, expected, expected, flag);
InvokeCode(rdx, expected, expected, flag, post_call_generator);
}
@ -1444,15 +1446,17 @@ void MacroAssembler::Pushad() {
// r15 is kSmiConstantRegister
STATIC_ASSERT(11 == kNumSafepointSavedRegisters);
// Use lea for symmetry with Popad.
lea(rsp, Operand(rsp,
-(kNumSafepointRegisters-kNumSafepointSavedRegisters) * kPointerSize));
int sp_delta =
(kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
lea(rsp, Operand(rsp, -sp_delta));
}
void MacroAssembler::Popad() {
// Popad must not change the flags, so use lea instead of addq.
lea(rsp, Operand(rsp,
(kNumSafepointRegisters-kNumSafepointSavedRegisters) * kPointerSize));
int sp_delta =
(kNumSafepointRegisters - kNumSafepointSavedRegisters) * kPointerSize;
lea(rsp, Operand(rsp, sp_delta));
pop(r14);
pop(r12);
pop(r11);
@ -1494,6 +1498,16 @@ int MacroAssembler::kSafepointPushRegisterIndices[Register::kNumRegisters] = {
};
void MacroAssembler::StoreToSafepointRegisterSlot(Register dst, Register src) {
movq(SafepointRegisterSlot(dst), src);
}
Operand MacroAssembler::SafepointRegisterSlot(Register reg) {
return Operand(rsp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
}
void MacroAssembler::PushTryHandler(CodeLocation try_location,
HandlerType type) {
// Adjust this code if not the case.
@ -1835,11 +1849,19 @@ void MacroAssembler::DebugBreak() {
void MacroAssembler::InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag) {
InvokeFlag flag,
PostCallGenerator* post_call_generator) {
NearLabel done;
InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag);
InvokePrologue(expected,
actual,
Handle<Code>::null(),
code,
&done,
flag,
post_call_generator);
if (flag == CALL_FUNCTION) {
call(code);
if (post_call_generator != NULL) post_call_generator->Generate();
} else {
ASSERT(flag == JUMP_FUNCTION);
jmp(code);
@ -1852,12 +1874,20 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
const ParameterCount& expected,
const ParameterCount& actual,
RelocInfo::Mode rmode,
InvokeFlag flag) {
InvokeFlag flag,
PostCallGenerator* post_call_generator) {
NearLabel done;
Register dummy = rax;
InvokePrologue(expected, actual, code, dummy, &done, flag);
InvokePrologue(expected,
actual,
code,
dummy,
&done,
flag,
post_call_generator);
if (flag == CALL_FUNCTION) {
Call(code, rmode);
if (post_call_generator != NULL) post_call_generator->Generate();
} else {
ASSERT(flag == JUMP_FUNCTION);
Jump(code, rmode);
@ -1868,7 +1898,8 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
void MacroAssembler::InvokeFunction(Register function,
const ParameterCount& actual,
InvokeFlag flag) {
InvokeFlag flag,
PostCallGenerator* post_call_generator) {
ASSERT(function.is(rdi));
movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
@ -1879,13 +1910,14 @@ void MacroAssembler::InvokeFunction(Register function,
movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
ParameterCount expected(rbx);
InvokeCode(rdx, expected, actual, flag);
InvokeCode(rdx, expected, actual, flag, post_call_generator);
}
void MacroAssembler::InvokeFunction(JSFunction* function,
const ParameterCount& actual,
InvokeFlag flag) {
InvokeFlag flag,
PostCallGenerator* post_call_generator) {
ASSERT(function->is_compiled());
// Get the function and setup the context.
Move(rdi, Handle<JSFunction>(function));
@ -1896,12 +1928,17 @@ void MacroAssembler::InvokeFunction(JSFunction* function,
// the Code object every time we call the function.
movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
ParameterCount expected(function->shared()->formal_parameter_count());
InvokeCode(rdx, expected, actual, flag);
InvokeCode(rdx, expected, actual, flag, post_call_generator);
} else {
// Invoke the cached code.
Handle<Code> code(function->code());
ParameterCount expected(function->shared()->formal_parameter_count());
InvokeCode(code, expected, actual, RelocInfo::CODE_TARGET, flag);
InvokeCode(code,
expected,
actual,
RelocInfo::CODE_TARGET,
flag,
post_call_generator);
}
}

View File

@ -58,6 +58,7 @@ typedef Operand MemOperand;
// Forward declaration.
class JumpTarget;
class PostCallGenerator;
struct SmiIndex {
SmiIndex(Register index_register, ScaleFactor scale)
@ -170,10 +171,9 @@ class MacroAssembler: public Assembler {
// Push and pop the registers that can hold pointers.
void PushSafepointRegisters() { Pushad(); }
void PopSafepointRegisters() { Popad(); }
static int SafepointRegisterStackIndex(int reg_code) {
return kNumSafepointRegisters - 1 -
kSafepointPushRegisterIndices[reg_code];
}
// Store the value in register src in the safepoint register stack
// slot for register dst.
void StoreToSafepointRegisterSlot(Register dst, Register src);
// ---------------------------------------------------------------------------
@ -183,27 +183,33 @@ class MacroAssembler: public Assembler {
void InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag);
InvokeFlag flag,
PostCallGenerator* post_call_generator = NULL);
void InvokeCode(Handle<Code> code,
const ParameterCount& expected,
const ParameterCount& actual,
RelocInfo::Mode rmode,
InvokeFlag flag);
InvokeFlag flag,
PostCallGenerator* post_call_generator = NULL);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
const ParameterCount& actual,
InvokeFlag flag);
InvokeFlag flag,
PostCallGenerator* post_call_generator = NULL);
void InvokeFunction(JSFunction* function,
const ParameterCount& actual,
InvokeFlag flag);
InvokeFlag flag,
PostCallGenerator* post_call_generator = NULL);
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag);
void InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag,
PostCallGenerator* post_call_generator = NULL);
// Store the function for the given builtin in the target register.
void GetBuiltinFunction(Register target, Builtins::JavaScript id);
@ -996,7 +1002,8 @@ class MacroAssembler: public Assembler {
Handle<Code> code_constant,
Register code_register,
LabelType* done,
InvokeFlag flag);
InvokeFlag flag,
PostCallGenerator* post_call_generator);
// Activation support.
void EnterFrame(StackFrame::Type type);
@ -1027,6 +1034,17 @@ class MacroAssembler: public Assembler {
Object* PopHandleScopeHelper(Register saved,
Register scratch,
bool gc_allowed);
// Compute memory operands for safepoint stack slots.
Operand SafepointRegisterSlot(Register reg);
static int SafepointRegisterStackIndex(int reg_code) {
return kNumSafepointRegisters - kSafepointPushRegisterIndices[reg_code] - 1;
}
// Needs access to SafepointRegisterStackIndex for optimized frame
// traversal.
friend class OptimizedFrame;
};
@ -1050,6 +1068,17 @@ class CodePatcher {
};
// Helper class for generating code or data associated with the code
// right after a call instruction. As an example this can be used to
// generate safepoint data after calls for crankshaft.
class PostCallGenerator {
public:
PostCallGenerator() { }
virtual ~PostCallGenerator() { }
virtual void Generate() = 0;
};
// -----------------------------------------------------------------------------
// Static helper functions.
@ -1756,7 +1785,8 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
Handle<Code> code_constant,
Register code_register,
LabelType* done,
InvokeFlag flag) {
InvokeFlag flag,
PostCallGenerator* post_call_generator) {
bool definitely_matches = false;
NearLabel invoke;
if (expected.is_immediate()) {
@ -1807,6 +1837,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
if (flag == CALL_FUNCTION) {
Call(adaptor, RelocInfo::CODE_TARGET);
if (post_call_generator != NULL) post_call_generator->Generate();
jmp(done);
} else {
Jump(adaptor, RelocInfo::CODE_TARGET);

View File

@ -2060,8 +2060,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
break;
case STRING_CHECK:
if (!function->IsBuiltin()) {
// Calling non-builtins with a value as receiver requires boxing.
if (!function->IsBuiltin() && !function_info->strict_mode()) {
// Calling non-strict non-builtins with a value as the receiver
// requires boxing.
__ jmp(&miss);
} else {
// Check that the object is a two-byte string or a symbol.
@ -2076,8 +2077,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
break;
case NUMBER_CHECK: {
if (!function->IsBuiltin()) {
// Calling non-builtins with a value as receiver requires boxing.
if (!function->IsBuiltin() && !function_info->strict_mode()) {
// Calling non-strict non-builtins with a value as the receiver
// requires boxing.
__ jmp(&miss);
} else {
Label fast;
@ -2096,8 +2098,9 @@ MaybeObject* CallStubCompiler::CompileCallConstant(Object* object,
}
case BOOLEAN_CHECK: {
if (!function->IsBuiltin()) {
// Calling non-builtins with a value as receiver requires boxing.
if (!function->IsBuiltin() && !function_info->strict_mode()) {
// Calling non-strict non-builtins with a value as the receiver
// requires boxing.
__ jmp(&miss);
} else {
Label fast;

View File

@ -46,30 +46,9 @@ test-heap-profiler/HeapSnapshotsDiff: PASS || FAIL
test-serialize/TestThatAlwaysFails: FAIL
test-serialize/DependentTestThatAlwaysFails: FAIL
##############################################################################
[ $arch == x64 ]
# Optimization is currently not working on crankshaft x64 and ARM.
test-heap/TestInternalWeakLists: PASS || FAIL
test-heap/TestInternalWeakListsTraverseWithGC: PASS || FAIL
##############################################################################
[ $arch == x64 && $crankshaft ]
# Tests that fail with crankshaft.
test-deoptimization/DeoptimizeBinaryOperationMOD: FAIL
test-deoptimization/DeoptimizeLoadICStoreIC: FAIL
test-deoptimization/DeoptimizeLoadICStoreICNested: FAIL
test-deoptimization/DeoptimizeCompare: PASS || FAIL
##############################################################################
[ $arch == arm ]
# Optimization is currently not working on crankshaft x64 and ARM.
test-heap/TestInternalWeakLists: PASS || FAIL
test-heap/TestInternalWeakListsTraverseWithGC: PASS || FAIL
# We cannot assume that we can throw OutOfMemory exceptions in all situations.
# Apparently our ARM box is in such a state. Skip the test as it also runs for
# a long time.

View File

@ -5652,8 +5652,7 @@ TEST(AccessControl) {
}
// This is a regression test for issue 1154.
TEST(AccessControlObjectKeys) {
TEST(AccessControlES5) {
v8::HandleScope handle_scope;
v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New();
@ -5677,7 +5676,29 @@ TEST(AccessControlObjectKeys) {
v8::Handle<v8::Object> global1 = context1->Global();
global1->Set(v8_str("other"), global0);
// Regression test for issue 1154.
ExpectTrue("Object.keys(other).indexOf('blocked_prop') == -1");
ExpectUndefined("other.blocked_prop");
// Regression test for issue 1027.
CompileRun("Object.defineProperty(\n"
" other, 'blocked_prop', {configurable: false})");
ExpectUndefined("other.blocked_prop");
ExpectUndefined(
"Object.getOwnPropertyDescriptor(other, 'blocked_prop')");
// Regression test for issue 1171.
ExpectTrue("Object.isExtensible(other)");
CompileRun("Object.preventExtensions(other)");
ExpectTrue("Object.isExtensible(other)");
// Object.seal and Object.freeze.
CompileRun("Object.freeze(other)");
ExpectTrue("Object.isExtensible(other)");
CompileRun("Object.seal(other)");
ExpectTrue("Object.isExtensible(other)");
}
@ -10825,6 +10846,24 @@ THREADED_TEST(PixelArray) {
"result");
CHECK_EQ(32640, result->Int32Value());
// Make sure that pixel array stores are optimized by crankshaft.
result = CompileRun("function pa_init(p) {"
"for (var i = 0; i < 256; ++i) { p[i] = i; }"
"}"
"function pa_load(p) {"
" var sum = 0;"
" for (var i=0; i<256; ++i) {"
" sum += p[i];"
" }"
" return sum; "
"}"
"for (var i = 0; i < 100000; ++i) {"
" pa_init(pixels);"
"}"
"result = pa_load(pixels);"
"result");
CHECK_EQ(32640, result->Int32Value());
free(pixel_data);
}
@ -10844,6 +10883,53 @@ THREADED_TEST(PixelArrayInfo) {
}
static v8::Handle<Value> NotHandledIndexedPropertyGetter(
uint32_t index,
const AccessorInfo& info) {
ApiTestFuzzer::Fuzz();
return v8::Handle<Value>();
}
static v8::Handle<Value> NotHandledIndexedPropertySetter(
uint32_t index,
Local<Value> value,
const AccessorInfo& info) {
ApiTestFuzzer::Fuzz();
return v8::Handle<Value>();
}
THREADED_TEST(PixelArrayWithInterceptor) {
v8::HandleScope scope;
LocalContext context;
const int kElementCount = 260;
uint8_t* pixel_data = reinterpret_cast<uint8_t*>(malloc(kElementCount));
i::Handle<i::PixelArray> pixels =
i::Factory::NewPixelArray(kElementCount, pixel_data);
for (int i = 0; i < kElementCount; i++) {
pixels->set(i, i % 256);
}
v8::Handle<v8::ObjectTemplate> templ = v8::ObjectTemplate::New();
templ->SetIndexedPropertyHandler(NotHandledIndexedPropertyGetter,
NotHandledIndexedPropertySetter);
v8::Handle<v8::Object> obj = templ->NewInstance();
obj->SetIndexedPropertiesToPixelData(pixel_data, kElementCount);
context->Global()->Set(v8_str("pixels"), obj);
v8::Handle<v8::Value> result = CompileRun("pixels[1]");
CHECK_EQ(1, result->Int32Value());
result = CompileRun("var sum = 0;"
"for (var i = 0; i < 8; i++) {"
" sum += pixels[i] = pixels[i] = -i;"
"}"
"sum;");
CHECK_EQ(-28, result->Int32Value());
result = CompileRun("pixels.hasOwnProperty('1')");
CHECK(result->BooleanValue());
free(pixel_data);
}
static int ExternalArrayElementSize(v8::ExternalArrayType array_type) {
switch (array_type) {
case v8::kExternalByteArray:

View File

@ -50,7 +50,7 @@ static void EnqueueTickSampleEvent(ProfilerEventsProcessor* proc,
i::Address frame3 = NULL) {
i::TickSample* sample = proc->TickSampleEvent();
sample->pc = frame1;
sample->function = frame1;
sample->tos = frame1;
sample->frames_count = 0;
if (frame2 != NULL) {
sample->stack[0] = frame2;
@ -103,7 +103,8 @@ TEST(CodeEvents) {
i::Heap::empty_string(),
0,
ToAddress(0x1000),
0x100);
0x100,
ToAddress(0x10000));
processor.CodeCreateEvent(i::Logger::BUILTIN_TAG,
"bbb",
ToAddress(0x1200),
@ -116,8 +117,6 @@ TEST(CodeEvents) {
processor.CodeMoveEvent(ToAddress(0x1400), ToAddress(0x1500));
processor.CodeCreateEvent(i::Logger::STUB_TAG, 3, ToAddress(0x1600), 0x10);
processor.CodeDeleteEvent(ToAddress(0x1600));
processor.FunctionCreateEvent(ToAddress(0x1700), ToAddress(0x1000),
TokenEnumerator::kNoSecurityToken);
// Enqueue a tick event to enable code events processing.
EnqueueTickSampleEvent(&processor, ToAddress(0x1000));
@ -139,9 +138,6 @@ TEST(CodeEvents) {
CHECK_NE(NULL, entry4);
CHECK_EQ("ddd", entry4->name());
CHECK_EQ(NULL, generator.code_map()->FindEntry(ToAddress(0x1600)));
CodeEntry* entry5 = generator.code_map()->FindEntry(ToAddress(0x1700));
CHECK_NE(NULL, entry5);
CHECK_EQ(aaa_str, entry5->name());
}

View File

@ -223,7 +223,7 @@ TEST(Unknown) {
{ DeclarationContext context;
context.Check("function x() { }; x",
1, // access
1, // declaration
0,
0,
EXPECT_RESULT);
}
@ -278,7 +278,7 @@ TEST(Present) {
{ PresentPropertyContext context;
context.Check("function x() { }; x",
1, // access
1, // declaration
0,
0,
EXPECT_RESULT);
}
@ -332,7 +332,7 @@ TEST(Absent) {
{ AbsentPropertyContext context;
context.Check("function x() { }; x",
1, // access
1, // declaration
0,
0,
EXPECT_RESULT);
}
@ -422,7 +422,7 @@ TEST(Appearing) {
{ AppearingPropertyContext context;
context.Check("function x() { }; x",
1, // access
1, // declaration
0,
0,
EXPECT_RESULT);
}

View File

@ -33,6 +33,7 @@
#include "v8.h"
#include "api.h"
#include "codegen.h"
#include "log.h"
#include "top.h"
@ -200,16 +201,16 @@ static void InitializeVM() {
}
static void CheckJSFunctionAtAddress(const char* func_name, Address addr) {
CHECK(i::Heap::Contains(addr));
i::Object* obj = i::HeapObject::FromAddress(addr);
CHECK(obj->IsJSFunction());
CHECK(JSFunction::cast(obj)->shared()->name()->IsString());
i::SmartPointer<char> found_name =
i::String::cast(
JSFunction::cast(
obj)->shared()->name())->ToCString();
CHECK_EQ(func_name, *found_name);
static bool IsAddressWithinFuncCode(JSFunction* function, Address addr) {
i::Code* code = function->code();
return code->contains(addr);
}
static bool IsAddressWithinFuncCode(const char* func_name, Address addr) {
v8::Local<v8::Value> func = env->Global()->Get(v8_str(func_name));
CHECK(func->IsFunction());
JSFunction* js_func = JSFunction::cast(*v8::Utils::OpenHandle(*func));
return IsAddressWithinFuncCode(js_func, addr);
}
@ -309,8 +310,8 @@ TEST(CFromJSStackTrace) {
// Stack tracing will start from the first JS function, i.e. "JSFuncDoTrace"
CHECK_GT(sample.frames_count, base + 1);
CheckJSFunctionAtAddress("JSFuncDoTrace", sample.stack[base + 0]);
CheckJSFunctionAtAddress("JSTrace", sample.stack[base + 1]);
CHECK(IsAddressWithinFuncCode("JSFuncDoTrace", sample.stack[base + 0]));
CHECK(IsAddressWithinFuncCode("JSTrace", sample.stack[base + 1]));
}
@ -351,9 +352,6 @@ TEST(PureJSStackTrace) {
// DoTraceHideCEntryFPAddress(EBP) [native]
// StackTracer::Trace
//
// The last JS function called. It is only visible through
// sample.function, as its return address is above captured EBP value.
CheckJSFunctionAtAddress("JSFuncDoTrace", sample.function);
// The VM state tracking keeps track of external callbacks and puts
// them at the top of the sample stack.
@ -363,8 +361,8 @@ TEST(PureJSStackTrace) {
// Stack sampling will start from the caller of JSFuncDoTrace, i.e. "JSTrace"
CHECK_GT(sample.frames_count, base + 1);
CheckJSFunctionAtAddress("JSTrace", sample.stack[base + 0]);
CheckJSFunctionAtAddress("OuterJSTrace", sample.stack[base + 1]);
CHECK(IsAddressWithinFuncCode("JSTrace", sample.stack[base + 0]));
CHECK(IsAddressWithinFuncCode("OuterJSTrace", sample.stack[base + 1]));
}

View File

@ -1053,10 +1053,10 @@ static bool AreFuncNamesEqual(CodeEntityInfo ref_s, CodeEntityInfo new_s) {
// Skip size.
ref_s = strchr(ref_s, ',') + 1;
new_s = strchr(new_s, ',') + 1;
int ref_len = StrChrLen(ref_s, '\n');
int new_len = StrChrLen(new_s, '\n');
// If reference is anonymous (""), it's OK to have anything in new.
if (ref_len == 2) return true;
CHECK_EQ('"', ref_s[0]);
CHECK_EQ('"', new_s[0]);
int ref_len = StrChrLen(ref_s + 1, '\"');
int new_len = StrChrLen(new_s + 1, '\"');
// A special case for ErrorPrototype. Haven't yet figured out why they
// are different.
const char* error_prototype = "\"ErrorPrototype";
@ -1074,21 +1074,6 @@ static bool AreFuncNamesEqual(CodeEntityInfo ref_s, CodeEntityInfo new_s) {
return true;
}
}
// Code objects can change their optimizability: code object may start
// as optimizable, but later be discovered to be actually not optimizable.
// Alas, we don't record this info as of now, so we allow cases when
// ref is thought to be optimizable while traverse finds it to be
// not optimizable.
if (ref_s[1] == '~') { // Code object used to be optimizable
if (new_s[1] == ' ') { // ...but later was set unoptimizable.
CHECK_EQ('"', ref_s[0]);
CHECK_EQ('"', new_s[0]);
ref_s += 2; // Cut the leading quote and the marker
ref_len -= 2;
new_s += 1; // Cut the leading quote only.
new_len -= 1;
}
}
return ref_len == new_len && strncmp(ref_s, new_s, ref_len) == 0;
}

View File

@ -321,14 +321,17 @@ TEST(Regress928) {
data->Initialize();
int first_function = strstr(program, "function") - program;
int first_lbrace = first_function + strlen("function () ");
int first_function =
static_cast<int>(strstr(program, "function") - program);
int first_lbrace = first_function + static_cast<int>(strlen("function () "));
CHECK_EQ('{', program[first_lbrace]);
i::FunctionEntry entry1 = data->GetFunctionEntry(first_lbrace);
CHECK(!entry1.is_valid());
int second_function = strstr(program + first_lbrace, "function") - program;
int second_lbrace = second_function + strlen("function () ");
int second_function =
static_cast<int>(strstr(program + first_lbrace, "function") - program);
int second_lbrace =
second_function + static_cast<int>(strlen("function () "));
CHECK_EQ('{', program[second_lbrace]);
i::FunctionEntry entry2 = data->GetFunctionEntry(second_lbrace);
CHECK(entry2.is_valid());

View File

@ -600,13 +600,13 @@ TEST(RecordTickSample) {
// -> ccc -> aaa - sample3
TickSample sample1;
sample1.pc = ToAddress(0x1600);
sample1.function = ToAddress(0x1500);
sample1.tos = ToAddress(0x1500);
sample1.stack[0] = ToAddress(0x1510);
sample1.frames_count = 1;
generator.RecordTickSample(sample1);
TickSample sample2;
sample2.pc = ToAddress(0x1925);
sample2.function = ToAddress(0x1900);
sample2.tos = ToAddress(0x1900);
sample2.stack[0] = ToAddress(0x1780);
sample2.stack[1] = ToAddress(0x10000); // non-existent.
sample2.stack[2] = ToAddress(0x1620);
@ -614,7 +614,7 @@ TEST(RecordTickSample) {
generator.RecordTickSample(sample2);
TickSample sample3;
sample3.pc = ToAddress(0x1510);
sample3.function = ToAddress(0x1500);
sample3.tos = ToAddress(0x1500);
sample3.stack[0] = ToAddress(0x1910);
sample3.stack[1] = ToAddress(0x1610);
sample3.frames_count = 2;

Some files were not shown because too many files have changed in this diff Show More