Upgrade V8 to 2.2.6
This commit is contained in:
parent
8f79169aef
commit
3b75f5070d
15
deps/v8/ChangeLog
vendored
15
deps/v8/ChangeLog
vendored
@ -1,3 +1,14 @@
|
|||||||
|
2010-04-28: Version 2.2.6
|
||||||
|
|
||||||
|
Add "amd64" as recognized architecture in scons build script
|
||||||
|
(by Ryan Dahl <coldredlemur@gmail.com>).
|
||||||
|
|
||||||
|
Fix bug in String search and replace with very simple RegExps.
|
||||||
|
|
||||||
|
Fix bug in RegExp containing "\b^".
|
||||||
|
|
||||||
|
Performance improvements on all platforms.
|
||||||
|
|
||||||
2010-04-26: Version 2.2.5
|
2010-04-26: Version 2.2.5
|
||||||
|
|
||||||
Various performance improvements (especially for ARM and x64)
|
Various performance improvements (especially for ARM and x64)
|
||||||
@ -5,8 +16,8 @@
|
|||||||
Fixed bug in CPU profiling (http://crbug.com/42137)
|
Fixed bug in CPU profiling (http://crbug.com/42137)
|
||||||
|
|
||||||
Fixed a bug with the natives cache.
|
Fixed a bug with the natives cache.
|
||||||
|
|
||||||
Fixed two bugs in the ARM code generator that can cause
|
Fixed two bugs in the ARM code generator that can cause
|
||||||
wrong calculations.
|
wrong calculations.
|
||||||
|
|
||||||
Fixed a bug that may cause a wrong result for shift operations.
|
Fixed a bug that may cause a wrong result for shift operations.
|
||||||
|
2
deps/v8/include/v8.h
vendored
2
deps/v8/include/v8.h
vendored
@ -767,7 +767,7 @@ class V8EXPORT Value : public Data {
|
|||||||
bool IsInt32() const;
|
bool IsInt32() const;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns true if this value is a 32-bit signed integer.
|
* Returns true if this value is a 32-bit unsigned integer.
|
||||||
*/
|
*/
|
||||||
bool IsUint32() const;
|
bool IsUint32() const;
|
||||||
|
|
||||||
|
4
deps/v8/src/api.cc
vendored
4
deps/v8/src/api.cc
vendored
@ -4020,8 +4020,8 @@ void Debug::ProcessDebugMessages() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
Local<Context> Debug::GetDebugContext() {
|
Local<Context> Debug::GetDebugContext() {
|
||||||
i::EnterDebugger debugger;
|
ENTER_V8;
|
||||||
return Utils::ToLocal(i::Debug::debug_context());
|
return Utils::ToLocal(i::Debugger::GetDebugContext());
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // ENABLE_DEBUGGER_SUPPORT
|
#endif // ENABLE_DEBUGGER_SUPPORT
|
||||||
|
8
deps/v8/src/arm/assembler-arm.cc
vendored
8
deps/v8/src/arm/assembler-arm.cc
vendored
@ -800,9 +800,10 @@ void Assembler::b(int branch_offset, Condition cond) {
|
|||||||
ASSERT(is_int24(imm24));
|
ASSERT(is_int24(imm24));
|
||||||
emit(cond | B27 | B25 | (imm24 & Imm24Mask));
|
emit(cond | B27 | B25 | (imm24 & Imm24Mask));
|
||||||
|
|
||||||
if (cond == al)
|
if (cond == al) {
|
||||||
// Dead code is a good location to emit the constant pool.
|
// Dead code is a good location to emit the constant pool.
|
||||||
CheckConstPool(false, false);
|
CheckConstPool(false, false);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -1784,6 +1785,11 @@ bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void Assembler::BlockConstPoolFor(int instructions) {
|
||||||
|
BlockConstPoolBefore(pc_offset() + instructions * kInstrSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
// Debugging.
|
// Debugging.
|
||||||
void Assembler::RecordJSReturn() {
|
void Assembler::RecordJSReturn() {
|
||||||
WriteRecordedPositions();
|
WriteRecordedPositions();
|
||||||
|
20
deps/v8/src/arm/assembler-arm.h
vendored
20
deps/v8/src/arm/assembler-arm.h
vendored
@ -941,6 +941,10 @@ class Assembler : public Malloced {
|
|||||||
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
|
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Postpone the generation of the constant pool for the specified number of
|
||||||
|
// instructions.
|
||||||
|
void BlockConstPoolFor(int instructions);
|
||||||
|
|
||||||
// Debugging
|
// Debugging
|
||||||
|
|
||||||
// Mark address of the ExitJSFrame code.
|
// Mark address of the ExitJSFrame code.
|
||||||
@ -956,14 +960,7 @@ class Assembler : public Malloced {
|
|||||||
|
|
||||||
int pc_offset() const { return pc_ - buffer_; }
|
int pc_offset() const { return pc_ - buffer_; }
|
||||||
int current_position() const { return current_position_; }
|
int current_position() const { return current_position_; }
|
||||||
int current_statement_position() const { return current_position_; }
|
int current_statement_position() const { return current_statement_position_; }
|
||||||
|
|
||||||
void StartBlockConstPool() {
|
|
||||||
const_pool_blocked_nesting_++;
|
|
||||||
}
|
|
||||||
void EndBlockConstPool() {
|
|
||||||
const_pool_blocked_nesting_--;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read/patch instructions
|
// Read/patch instructions
|
||||||
static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
|
static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
|
||||||
@ -1001,6 +998,13 @@ class Assembler : public Malloced {
|
|||||||
if (no_const_pool_before_ < pc_offset) no_const_pool_before_ = pc_offset;
|
if (no_const_pool_before_ < pc_offset) no_const_pool_before_ = pc_offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void StartBlockConstPool() {
|
||||||
|
const_pool_blocked_nesting_++;
|
||||||
|
}
|
||||||
|
void EndBlockConstPool() {
|
||||||
|
const_pool_blocked_nesting_--;
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
// Code buffer:
|
// Code buffer:
|
||||||
// The buffer into which code and relocation info are generated.
|
// The buffer into which code and relocation info are generated.
|
||||||
|
2
deps/v8/src/arm/assembler-thumb2.h
vendored
2
deps/v8/src/arm/assembler-thumb2.h
vendored
@ -898,7 +898,7 @@ class Assembler : public Malloced {
|
|||||||
|
|
||||||
int pc_offset() const { return pc_ - buffer_; }
|
int pc_offset() const { return pc_ - buffer_; }
|
||||||
int current_position() const { return current_position_; }
|
int current_position() const { return current_position_; }
|
||||||
int current_statement_position() const { return current_position_; }
|
int current_statement_position() const { return current_statement_position_; }
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
int buffer_space() const { return reloc_info_writer.pos() - pc_; }
|
int buffer_space() const { return reloc_info_writer.pos() - pc_; }
|
||||||
|
474
deps/v8/src/arm/codegen-arm.cc
vendored
474
deps/v8/src/arm/codegen-arm.cc
vendored
@ -351,17 +351,17 @@ void CodeGenerator::Generate(CompilationInfo* info) {
|
|||||||
int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
|
int32_t sp_delta = (scope()->num_parameters() + 1) * kPointerSize;
|
||||||
masm_->add(sp, sp, Operand(sp_delta));
|
masm_->add(sp, sp, Operand(sp_delta));
|
||||||
masm_->Jump(lr);
|
masm_->Jump(lr);
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
// Check that the size of the code used for returning matches what is
|
// Check that the size of the code used for returning matches what is
|
||||||
// expected by the debugger. If the sp_delts above cannot be encoded in the
|
// expected by the debugger. If the sp_delts above cannot be encoded in
|
||||||
// add instruction the add will generate two instructions.
|
// the add instruction the add will generate two instructions.
|
||||||
int return_sequence_length =
|
int return_sequence_length =
|
||||||
masm_->InstructionsGeneratedSince(&check_exit_codesize);
|
masm_->InstructionsGeneratedSince(&check_exit_codesize);
|
||||||
CHECK(return_sequence_length == Assembler::kJSReturnSequenceLength ||
|
CHECK(return_sequence_length == Assembler::kJSReturnSequenceLength ||
|
||||||
return_sequence_length == Assembler::kJSReturnSequenceLength + 1);
|
return_sequence_length == Assembler::kJSReturnSequenceLength + 1);
|
||||||
#endif
|
#endif
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Adjust for function-level loop nesting.
|
// Adjust for function-level loop nesting.
|
||||||
@ -570,9 +570,9 @@ void CodeGenerator::Load(Expression* expr) {
|
|||||||
|
|
||||||
|
|
||||||
void CodeGenerator::LoadGlobal() {
|
void CodeGenerator::LoadGlobal() {
|
||||||
VirtualFrame::SpilledScope spilled_scope(frame_);
|
Register reg = frame_->GetTOSRegister();
|
||||||
__ ldr(r0, GlobalObject());
|
__ ldr(reg, GlobalObject());
|
||||||
frame_->EmitPush(r0);
|
frame_->EmitPush(reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -619,7 +619,7 @@ void CodeGenerator::StoreArgumentsObject(bool initial) {
|
|||||||
__ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
|
__ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
|
||||||
__ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
|
__ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
|
||||||
frame_->Adjust(3);
|
frame_->Adjust(3);
|
||||||
__ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit());
|
__ Push(r2, r1, r0);
|
||||||
frame_->CallStub(&stub, 3);
|
frame_->CallStub(&stub, 3);
|
||||||
frame_->EmitPush(r0);
|
frame_->EmitPush(r0);
|
||||||
}
|
}
|
||||||
@ -687,7 +687,6 @@ Reference::~Reference() {
|
|||||||
|
|
||||||
|
|
||||||
void CodeGenerator::LoadReference(Reference* ref) {
|
void CodeGenerator::LoadReference(Reference* ref) {
|
||||||
VirtualFrame::SpilledScope spilled_scope(frame_);
|
|
||||||
Comment cmnt(masm_, "[ LoadReference");
|
Comment cmnt(masm_, "[ LoadReference");
|
||||||
Expression* e = ref->expression();
|
Expression* e = ref->expression();
|
||||||
Property* property = e->AsProperty();
|
Property* property = e->AsProperty();
|
||||||
@ -696,11 +695,11 @@ void CodeGenerator::LoadReference(Reference* ref) {
|
|||||||
if (property != NULL) {
|
if (property != NULL) {
|
||||||
// The expression is either a property or a variable proxy that rewrites
|
// The expression is either a property or a variable proxy that rewrites
|
||||||
// to a property.
|
// to a property.
|
||||||
LoadAndSpill(property->obj());
|
Load(property->obj());
|
||||||
if (property->key()->IsPropertyName()) {
|
if (property->key()->IsPropertyName()) {
|
||||||
ref->set_type(Reference::NAMED);
|
ref->set_type(Reference::NAMED);
|
||||||
} else {
|
} else {
|
||||||
LoadAndSpill(property->key());
|
Load(property->key());
|
||||||
ref->set_type(Reference::KEYED);
|
ref->set_type(Reference::KEYED);
|
||||||
}
|
}
|
||||||
} else if (var != NULL) {
|
} else if (var != NULL) {
|
||||||
@ -715,6 +714,7 @@ void CodeGenerator::LoadReference(Reference* ref) {
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Anything else is a runtime error.
|
// Anything else is a runtime error.
|
||||||
|
VirtualFrame::SpilledScope spilled_scope(frame_);
|
||||||
LoadAndSpill(e);
|
LoadAndSpill(e);
|
||||||
frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
|
frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
|
||||||
}
|
}
|
||||||
@ -1527,6 +1527,7 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
|
|||||||
LoadAndSpill(applicand);
|
LoadAndSpill(applicand);
|
||||||
Handle<String> name = Factory::LookupAsciiSymbol("apply");
|
Handle<String> name = Factory::LookupAsciiSymbol("apply");
|
||||||
__ mov(r2, Operand(name));
|
__ mov(r2, Operand(name));
|
||||||
|
__ ldr(r0, MemOperand(sp, 0));
|
||||||
frame_->CallLoadIC(RelocInfo::CODE_TARGET);
|
frame_->CallLoadIC(RelocInfo::CODE_TARGET);
|
||||||
frame_->EmitPush(r0);
|
frame_->EmitPush(r0);
|
||||||
|
|
||||||
@ -2948,9 +2949,10 @@ void CodeGenerator::VisitConditional(Conditional* node) {
|
|||||||
|
|
||||||
void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
|
void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
|
||||||
if (slot->type() == Slot::LOOKUP) {
|
if (slot->type() == Slot::LOOKUP) {
|
||||||
VirtualFrame::SpilledScope spilled_scope(frame_);
|
|
||||||
ASSERT(slot->var()->is_dynamic());
|
ASSERT(slot->var()->is_dynamic());
|
||||||
|
|
||||||
|
// JumpTargets do not yet support merging frames so the frame must be
|
||||||
|
// spilled when jumping to these targets.
|
||||||
JumpTarget slow;
|
JumpTarget slow;
|
||||||
JumpTarget done;
|
JumpTarget done;
|
||||||
|
|
||||||
@ -2960,16 +2962,18 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
|
|||||||
// perform a runtime call for all variables in the scope
|
// perform a runtime call for all variables in the scope
|
||||||
// containing the eval.
|
// containing the eval.
|
||||||
if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
|
if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
|
||||||
LoadFromGlobalSlotCheckExtensions(slot, typeof_state, r1, r2, &slow);
|
LoadFromGlobalSlotCheckExtensions(slot, typeof_state, &slow);
|
||||||
// If there was no control flow to slow, we can exit early.
|
// If there was no control flow to slow, we can exit early.
|
||||||
if (!slow.is_linked()) {
|
if (!slow.is_linked()) {
|
||||||
frame_->EmitPush(r0);
|
frame_->EmitPush(r0);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
frame_->SpillAll();
|
||||||
|
|
||||||
done.Jump();
|
done.Jump();
|
||||||
|
|
||||||
} else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
|
} else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
|
||||||
|
frame_->SpillAll();
|
||||||
Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
|
Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
|
||||||
// Only generate the fast case for locals that rewrite to slots.
|
// Only generate the fast case for locals that rewrite to slots.
|
||||||
// This rules out argument loads.
|
// This rules out argument loads.
|
||||||
@ -2992,6 +2996,7 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
slow.Bind();
|
slow.Bind();
|
||||||
|
VirtualFrame::SpilledScope spilled_scope(frame_);
|
||||||
frame_->EmitPush(cp);
|
frame_->EmitPush(cp);
|
||||||
__ mov(r0, Operand(slot->var()->name()));
|
__ mov(r0, Operand(slot->var()->name()));
|
||||||
frame_->EmitPush(r0);
|
frame_->EmitPush(r0);
|
||||||
@ -3143,16 +3148,17 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
|
|||||||
|
|
||||||
void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
|
void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
|
||||||
TypeofState typeof_state,
|
TypeofState typeof_state,
|
||||||
Register tmp,
|
|
||||||
Register tmp2,
|
|
||||||
JumpTarget* slow) {
|
JumpTarget* slow) {
|
||||||
// Check that no extension objects have been created by calls to
|
// Check that no extension objects have been created by calls to
|
||||||
// eval from the current scope to the global scope.
|
// eval from the current scope to the global scope.
|
||||||
|
Register tmp = frame_->scratch0();
|
||||||
|
Register tmp2 = frame_->scratch1();
|
||||||
Register context = cp;
|
Register context = cp;
|
||||||
Scope* s = scope();
|
Scope* s = scope();
|
||||||
while (s != NULL) {
|
while (s != NULL) {
|
||||||
if (s->num_heap_slots() > 0) {
|
if (s->num_heap_slots() > 0) {
|
||||||
if (s->calls_eval()) {
|
if (s->calls_eval()) {
|
||||||
|
frame_->SpillAll();
|
||||||
// Check that extension is NULL.
|
// Check that extension is NULL.
|
||||||
__ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
|
__ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX));
|
||||||
__ tst(tmp2, tmp2);
|
__ tst(tmp2, tmp2);
|
||||||
@ -3170,6 +3176,7 @@ void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (s->is_eval_scope()) {
|
if (s->is_eval_scope()) {
|
||||||
|
frame_->SpillAll();
|
||||||
Label next, fast;
|
Label next, fast;
|
||||||
__ Move(tmp, context);
|
__ Move(tmp, context);
|
||||||
__ bind(&next);
|
__ bind(&next);
|
||||||
@ -3192,6 +3199,7 @@ void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
|
|||||||
// Load the global object.
|
// Load the global object.
|
||||||
LoadGlobal();
|
LoadGlobal();
|
||||||
// Setup the name register and call load IC.
|
// Setup the name register and call load IC.
|
||||||
|
frame_->SpillAllButCopyTOSToR0();
|
||||||
__ mov(r2, Operand(slot->var()->name()));
|
__ mov(r2, Operand(slot->var()->name()));
|
||||||
frame_->CallLoadIC(typeof_state == INSIDE_TYPEOF
|
frame_->CallLoadIC(typeof_state == INSIDE_TYPEOF
|
||||||
? RelocInfo::CODE_TARGET
|
? RelocInfo::CODE_TARGET
|
||||||
@ -3524,7 +3532,6 @@ void CodeGenerator::VisitProperty(Property* node) {
|
|||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
int original_height = frame_->height();
|
int original_height = frame_->height();
|
||||||
#endif
|
#endif
|
||||||
VirtualFrame::SpilledScope spilled_scope(frame_);
|
|
||||||
Comment cmnt(masm_, "[ Property");
|
Comment cmnt(masm_, "[ Property");
|
||||||
|
|
||||||
{ Reference property(this, node);
|
{ Reference property(this, node);
|
||||||
@ -3703,7 +3710,7 @@ void CodeGenerator::VisitCall(Call* node) {
|
|||||||
|
|
||||||
LoadAndSpill(property->obj());
|
LoadAndSpill(property->obj());
|
||||||
LoadAndSpill(property->key());
|
LoadAndSpill(property->key());
|
||||||
EmitKeyedLoad(false);
|
EmitKeyedLoad();
|
||||||
frame_->Drop(); // key
|
frame_->Drop(); // key
|
||||||
// Put the function below the receiver.
|
// Put the function below the receiver.
|
||||||
if (property->is_synthetic()) {
|
if (property->is_synthetic()) {
|
||||||
@ -4437,8 +4444,7 @@ class DeferredSearchCache: public DeferredCode {
|
|||||||
|
|
||||||
|
|
||||||
void DeferredSearchCache::Generate() {
|
void DeferredSearchCache::Generate() {
|
||||||
__ push(cache_);
|
__ Push(cache_, key_);
|
||||||
__ push(key_);
|
|
||||||
__ CallRuntime(Runtime::kGetFromCache, 2);
|
__ CallRuntime(Runtime::kGetFromCache, 2);
|
||||||
if (!dst_.is(r0)) {
|
if (!dst_.is(r0)) {
|
||||||
__ mov(dst_, r0);
|
__ mov(dst_, r0);
|
||||||
@ -5231,34 +5237,105 @@ class DeferredReferenceGetNamedValue: public DeferredCode {
|
|||||||
set_comment("[ DeferredReferenceGetNamedValue");
|
set_comment("[ DeferredReferenceGetNamedValue");
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void BeforeGenerate();
|
|
||||||
virtual void Generate();
|
virtual void Generate();
|
||||||
virtual void AfterGenerate();
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Handle<String> name_;
|
Handle<String> name_;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
void DeferredReferenceGetNamedValue::BeforeGenerate() {
|
|
||||||
__ StartBlockConstPool();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void DeferredReferenceGetNamedValue::Generate() {
|
void DeferredReferenceGetNamedValue::Generate() {
|
||||||
__ IncrementCounter(&Counters::named_load_inline_miss, 1, r1, r2);
|
Register scratch1 = VirtualFrame::scratch0();
|
||||||
// Setup the name register and call load IC.
|
Register scratch2 = VirtualFrame::scratch1();
|
||||||
|
__ DecrementCounter(&Counters::named_load_inline, 1, scratch1, scratch2);
|
||||||
|
__ IncrementCounter(&Counters::named_load_inline_miss, 1, scratch1, scratch2);
|
||||||
|
|
||||||
|
// Setup the registers and call load IC.
|
||||||
|
// On entry to this deferred code, r0 is assumed to already contain the
|
||||||
|
// receiver from the top of the stack.
|
||||||
__ mov(r2, Operand(name_));
|
__ mov(r2, Operand(name_));
|
||||||
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
|
|
||||||
__ Call(ic, RelocInfo::CODE_TARGET);
|
// The rest of the instructions in the deferred code must be together.
|
||||||
// The call must be followed by a nop(1) instruction to indicate that the
|
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
|
||||||
// inobject has been inlined.
|
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
|
||||||
__ nop(NAMED_PROPERTY_LOAD_INLINED);
|
__ Call(ic, RelocInfo::CODE_TARGET);
|
||||||
|
// The call must be followed by a nop(1) instruction to indicate that the
|
||||||
|
// in-object has been inlined.
|
||||||
|
__ nop(PROPERTY_ACCESS_INLINED);
|
||||||
|
|
||||||
|
// Block the constant pool for one more instruction after leaving this
|
||||||
|
// constant pool block scope to include the branch instruction ending the
|
||||||
|
// deferred code.
|
||||||
|
__ BlockConstPoolFor(1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void DeferredReferenceGetNamedValue::AfterGenerate() {
|
class DeferredReferenceGetKeyedValue: public DeferredCode {
|
||||||
__ EndBlockConstPool();
|
public:
|
||||||
|
DeferredReferenceGetKeyedValue() {
|
||||||
|
set_comment("[ DeferredReferenceGetKeyedValue");
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual void Generate();
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
void DeferredReferenceGetKeyedValue::Generate() {
|
||||||
|
Register scratch1 = VirtualFrame::scratch0();
|
||||||
|
Register scratch2 = VirtualFrame::scratch1();
|
||||||
|
__ DecrementCounter(&Counters::keyed_load_inline, 1, scratch1, scratch2);
|
||||||
|
__ IncrementCounter(&Counters::keyed_load_inline_miss, 1, scratch1, scratch2);
|
||||||
|
|
||||||
|
// The rest of the instructions in the deferred code must be together.
|
||||||
|
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
|
||||||
|
// Call keyed load IC. It has all arguments on the stack.
|
||||||
|
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
|
||||||
|
__ Call(ic, RelocInfo::CODE_TARGET);
|
||||||
|
// The call must be followed by a nop instruction to indicate that the
|
||||||
|
// keyed load has been inlined.
|
||||||
|
__ nop(PROPERTY_ACCESS_INLINED);
|
||||||
|
|
||||||
|
// Block the constant pool for one more instruction after leaving this
|
||||||
|
// constant pool block scope to include the branch instruction ending the
|
||||||
|
// deferred code.
|
||||||
|
__ BlockConstPoolFor(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class DeferredReferenceSetKeyedValue: public DeferredCode {
|
||||||
|
public:
|
||||||
|
DeferredReferenceSetKeyedValue() {
|
||||||
|
set_comment("[ DeferredReferenceSetKeyedValue");
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual void Generate();
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
void DeferredReferenceSetKeyedValue::Generate() {
|
||||||
|
Register scratch1 = VirtualFrame::scratch0();
|
||||||
|
Register scratch2 = VirtualFrame::scratch1();
|
||||||
|
__ DecrementCounter(&Counters::keyed_store_inline, 1, scratch1, scratch2);
|
||||||
|
__ IncrementCounter(
|
||||||
|
&Counters::keyed_store_inline_miss, 1, scratch1, scratch2);
|
||||||
|
|
||||||
|
// The rest of the instructions in the deferred code must be together.
|
||||||
|
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
|
||||||
|
// Call keyed load IC. It has receiver amd key on the stack and the value to
|
||||||
|
// store in r0.
|
||||||
|
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
|
||||||
|
__ Call(ic, RelocInfo::CODE_TARGET);
|
||||||
|
// The call must be followed by a nop instruction to indicate that the
|
||||||
|
// keyed store has been inlined.
|
||||||
|
__ nop(PROPERTY_ACCESS_INLINED);
|
||||||
|
|
||||||
|
// Block the constant pool for one more instruction after leaving this
|
||||||
|
// constant pool block scope to include the branch instruction ending the
|
||||||
|
// deferred code.
|
||||||
|
__ BlockConstPoolFor(1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -5266,63 +5343,231 @@ void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
|
|||||||
if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
|
if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
|
||||||
Comment cmnt(masm(), "[ Load from named Property");
|
Comment cmnt(masm(), "[ Load from named Property");
|
||||||
// Setup the name register and call load IC.
|
// Setup the name register and call load IC.
|
||||||
|
frame_->SpillAllButCopyTOSToR0();
|
||||||
__ mov(r2, Operand(name));
|
__ mov(r2, Operand(name));
|
||||||
frame_->CallLoadIC(is_contextual
|
frame_->CallLoadIC(is_contextual
|
||||||
? RelocInfo::CODE_TARGET_CONTEXT
|
? RelocInfo::CODE_TARGET_CONTEXT
|
||||||
: RelocInfo::CODE_TARGET);
|
: RelocInfo::CODE_TARGET);
|
||||||
} else {
|
} else {
|
||||||
// Inline the inobject property case.
|
// Inline the in-object property case.
|
||||||
Comment cmnt(masm(), "[ Inlined named property load");
|
Comment cmnt(masm(), "[ Inlined named property load");
|
||||||
|
|
||||||
DeferredReferenceGetNamedValue* deferred =
|
// Counter will be decremented in the deferred code. Placed here to avoid
|
||||||
new DeferredReferenceGetNamedValue(name);
|
// having it in the instruction stream below where patching will occur.
|
||||||
|
__ IncrementCounter(&Counters::named_load_inline, 1,
|
||||||
|
frame_->scratch0(), frame_->scratch1());
|
||||||
|
|
||||||
// The following instructions are the inlined load of an in-object property.
|
// The following instructions are the inlined load of an in-object property.
|
||||||
// Parts of this code is patched, so the exact instructions generated needs
|
// Parts of this code is patched, so the exact instructions generated needs
|
||||||
// to be fixed. Therefore the instruction pool is blocked when generating
|
// to be fixed. Therefore the instruction pool is blocked when generating
|
||||||
// this code
|
// this code
|
||||||
|
|
||||||
|
// Load the receiver from the stack.
|
||||||
|
frame_->SpillAllButCopyTOSToR0();
|
||||||
|
|
||||||
|
DeferredReferenceGetNamedValue* deferred =
|
||||||
|
new DeferredReferenceGetNamedValue(name);
|
||||||
|
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
int kInlinedNamedLoadInstructions = 8;
|
int kInlinedNamedLoadInstructions = 7;
|
||||||
Label check_inlined_codesize;
|
Label check_inlined_codesize;
|
||||||
masm_->bind(&check_inlined_codesize);
|
masm_->bind(&check_inlined_codesize);
|
||||||
#endif
|
#endif
|
||||||
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
|
|
||||||
// Load the receiver from the stack.
|
|
||||||
__ ldr(r1, MemOperand(sp, 0));
|
|
||||||
|
|
||||||
|
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
|
||||||
// Check that the receiver is a heap object.
|
// Check that the receiver is a heap object.
|
||||||
__ tst(r1, Operand(kSmiTagMask));
|
__ tst(r0, Operand(kSmiTagMask));
|
||||||
deferred->Branch(eq);
|
deferred->Branch(eq);
|
||||||
|
|
||||||
// Check the map. The null map used below is patched by the inline cache
|
// Check the map. The null map used below is patched by the inline cache
|
||||||
// code.
|
// code.
|
||||||
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
|
__ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
|
||||||
__ mov(r3, Operand(Factory::null_value()));
|
__ mov(r3, Operand(Factory::null_value()));
|
||||||
__ cmp(r2, r3);
|
__ cmp(r2, r3);
|
||||||
deferred->Branch(ne);
|
deferred->Branch(ne);
|
||||||
|
|
||||||
// Use initially use an invalid index. The index will be patched by the
|
// Initially use an invalid index. The index will be patched by the
|
||||||
// inline cache code.
|
// inline cache code.
|
||||||
__ ldr(r0, MemOperand(r1, 0));
|
__ ldr(r0, MemOperand(r0, 0));
|
||||||
|
|
||||||
|
// Make sure that the expected number of instructions are generated.
|
||||||
|
ASSERT_EQ(kInlinedNamedLoadInstructions,
|
||||||
|
masm_->InstructionsGeneratedSince(&check_inlined_codesize));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure that the expected number of instructions are generated.
|
|
||||||
ASSERT_EQ(kInlinedNamedLoadInstructions,
|
|
||||||
masm_->InstructionsGeneratedSince(&check_inlined_codesize));
|
|
||||||
|
|
||||||
__ IncrementCounter(&Counters::named_load_inline, 1, r1, r2);
|
|
||||||
deferred->BindExit();
|
deferred->BindExit();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void CodeGenerator::EmitKeyedLoad(bool is_global) {
|
void CodeGenerator::EmitKeyedLoad() {
|
||||||
Comment cmnt(masm_, "[ Load from keyed Property");
|
if (loop_nesting() == 0) {
|
||||||
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
|
VirtualFrame::SpilledScope spilled(frame_);
|
||||||
RelocInfo::Mode rmode = is_global
|
Comment cmnt(masm_, "[ Load from keyed property");
|
||||||
? RelocInfo::CODE_TARGET_CONTEXT
|
frame_->CallKeyedLoadIC();
|
||||||
: RelocInfo::CODE_TARGET;
|
} else {
|
||||||
frame_->CallCodeObject(ic, rmode, 0);
|
// Inline the keyed load.
|
||||||
|
Comment cmnt(masm_, "[ Inlined load from keyed property");
|
||||||
|
|
||||||
|
// Counter will be decremented in the deferred code. Placed here to avoid
|
||||||
|
// having it in the instruction stream below where patching will occur.
|
||||||
|
__ IncrementCounter(&Counters::keyed_load_inline, 1,
|
||||||
|
frame_->scratch0(), frame_->scratch1());
|
||||||
|
|
||||||
|
// Load the receiver and key from the stack.
|
||||||
|
frame_->SpillAllButCopyTOSToR1R0();
|
||||||
|
Register receiver = r0;
|
||||||
|
Register key = r1;
|
||||||
|
VirtualFrame::SpilledScope spilled(frame_);
|
||||||
|
|
||||||
|
DeferredReferenceGetKeyedValue* deferred =
|
||||||
|
new DeferredReferenceGetKeyedValue();
|
||||||
|
|
||||||
|
// Check that the receiver is a heap object.
|
||||||
|
__ tst(receiver, Operand(kSmiTagMask));
|
||||||
|
deferred->Branch(eq);
|
||||||
|
|
||||||
|
// The following instructions are the part of the inlined load keyed
|
||||||
|
// property code which can be patched. Therefore the exact number of
|
||||||
|
// instructions generated need to be fixed, so the constant pool is blocked
|
||||||
|
// while generating this code.
|
||||||
|
#ifdef DEBUG
|
||||||
|
int kInlinedKeyedLoadInstructions = 19;
|
||||||
|
Label check_inlined_codesize;
|
||||||
|
masm_->bind(&check_inlined_codesize);
|
||||||
|
#endif
|
||||||
|
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
|
||||||
|
Register scratch1 = VirtualFrame::scratch0();
|
||||||
|
Register scratch2 = VirtualFrame::scratch1();
|
||||||
|
// Check the map. The null map used below is patched by the inline cache
|
||||||
|
// code.
|
||||||
|
__ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
||||||
|
__ mov(scratch2, Operand(Factory::null_value()));
|
||||||
|
__ cmp(scratch1, scratch2);
|
||||||
|
deferred->Branch(ne);
|
||||||
|
|
||||||
|
// Check that the key is a smi.
|
||||||
|
__ tst(key, Operand(kSmiTagMask));
|
||||||
|
deferred->Branch(ne);
|
||||||
|
|
||||||
|
// Get the elements array from the receiver and check that it
|
||||||
|
// is not a dictionary.
|
||||||
|
__ ldr(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
||||||
|
__ ldr(scratch2, FieldMemOperand(scratch1, JSObject::kMapOffset));
|
||||||
|
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
|
||||||
|
__ cmp(scratch2, ip);
|
||||||
|
deferred->Branch(ne);
|
||||||
|
|
||||||
|
// Check that key is within bounds. Use unsigned comparison to handle
|
||||||
|
// negative keys.
|
||||||
|
__ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
|
||||||
|
__ cmp(scratch2, Operand(key, ASR, kSmiTagSize));
|
||||||
|
deferred->Branch(ls); // Unsigned less equal.
|
||||||
|
|
||||||
|
// Load and check that the result is not the hole (key is a smi).
|
||||||
|
__ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
|
||||||
|
__ add(scratch1,
|
||||||
|
scratch1,
|
||||||
|
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||||
|
__ ldr(r0,
|
||||||
|
MemOperand(scratch1, key, LSL,
|
||||||
|
kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
|
||||||
|
__ cmp(r0, scratch2);
|
||||||
|
// This is the only branch to deferred where r0 and r1 do not contain the
|
||||||
|
// receiver and key. We can't just load undefined here because we have to
|
||||||
|
// check the prototype.
|
||||||
|
deferred->Branch(eq);
|
||||||
|
|
||||||
|
// Make sure that the expected number of instructions are generated.
|
||||||
|
ASSERT_EQ(kInlinedKeyedLoadInstructions,
|
||||||
|
masm_->InstructionsGeneratedSince(&check_inlined_codesize));
|
||||||
|
}
|
||||||
|
|
||||||
|
deferred->BindExit();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void CodeGenerator::EmitKeyedStore(StaticType* key_type) {
|
||||||
|
frame_->AssertIsSpilled();
|
||||||
|
// Generate inlined version of the keyed store if the code is in a loop
|
||||||
|
// and the key is likely to be a smi.
|
||||||
|
if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
|
||||||
|
// Inline the keyed store.
|
||||||
|
Comment cmnt(masm_, "[ Inlined store to keyed property");
|
||||||
|
|
||||||
|
DeferredReferenceSetKeyedValue* deferred =
|
||||||
|
new DeferredReferenceSetKeyedValue();
|
||||||
|
|
||||||
|
// Counter will be decremented in the deferred code. Placed here to avoid
|
||||||
|
// having it in the instruction stream below where patching will occur.
|
||||||
|
__ IncrementCounter(&Counters::keyed_store_inline, 1,
|
||||||
|
frame_->scratch0(), frame_->scratch1());
|
||||||
|
|
||||||
|
// Check that the value is a smi. As this inlined code does not set the
|
||||||
|
// write barrier it is only possible to store smi values.
|
||||||
|
__ tst(r0, Operand(kSmiTagMask));
|
||||||
|
deferred->Branch(ne);
|
||||||
|
|
||||||
|
// Load the key and receiver from the stack.
|
||||||
|
__ ldr(r1, MemOperand(sp, 0));
|
||||||
|
__ ldr(r2, MemOperand(sp, kPointerSize));
|
||||||
|
|
||||||
|
// Check that the key is a smi.
|
||||||
|
__ tst(r1, Operand(kSmiTagMask));
|
||||||
|
deferred->Branch(ne);
|
||||||
|
|
||||||
|
// Check that the receiver is a heap object.
|
||||||
|
__ tst(r2, Operand(kSmiTagMask));
|
||||||
|
deferred->Branch(eq);
|
||||||
|
|
||||||
|
// Check that the receiver is a JSArray.
|
||||||
|
__ CompareObjectType(r2, r3, r3, JS_ARRAY_TYPE);
|
||||||
|
deferred->Branch(ne);
|
||||||
|
|
||||||
|
// Check that the key is within bounds. Both the key and the length of
|
||||||
|
// the JSArray are smis. Use unsigned comparison to handle negative keys.
|
||||||
|
__ ldr(r3, FieldMemOperand(r2, JSArray::kLengthOffset));
|
||||||
|
__ cmp(r3, r1);
|
||||||
|
deferred->Branch(ls); // Unsigned less equal.
|
||||||
|
|
||||||
|
// The following instructions are the part of the inlined store keyed
|
||||||
|
// property code which can be patched. Therefore the exact number of
|
||||||
|
// instructions generated need to be fixed, so the constant pool is blocked
|
||||||
|
// while generating this code.
|
||||||
|
#ifdef DEBUG
|
||||||
|
int kInlinedKeyedStoreInstructions = 7;
|
||||||
|
Label check_inlined_codesize;
|
||||||
|
masm_->bind(&check_inlined_codesize);
|
||||||
|
#endif
|
||||||
|
{ Assembler::BlockConstPoolScope block_const_pool(masm_);
|
||||||
|
// Get the elements array from the receiver and check that it
|
||||||
|
// is not a dictionary.
|
||||||
|
__ ldr(r3, FieldMemOperand(r2, JSObject::kElementsOffset));
|
||||||
|
__ ldr(r4, FieldMemOperand(r3, JSObject::kMapOffset));
|
||||||
|
// Read the fixed array map from the constant pool (not from the root
|
||||||
|
// array) so that the value can be patched. When debugging, we patch this
|
||||||
|
// comparison to always fail so that we will hit the IC call in the
|
||||||
|
// deferred code which will allow the debugger to break for fast case
|
||||||
|
// stores.
|
||||||
|
__ mov(r5, Operand(Factory::fixed_array_map()));
|
||||||
|
__ cmp(r4, r5);
|
||||||
|
deferred->Branch(ne);
|
||||||
|
|
||||||
|
// Store the value.
|
||||||
|
__ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||||
|
__ str(r0, MemOperand(r3, r1, LSL,
|
||||||
|
kPointerSizeLog2 - (kSmiTagSize + kSmiShiftSize)));
|
||||||
|
|
||||||
|
// Make sure that the expected number of instructions are generated.
|
||||||
|
ASSERT_EQ(kInlinedKeyedStoreInstructions,
|
||||||
|
masm_->InstructionsGeneratedSince(&check_inlined_codesize));
|
||||||
|
}
|
||||||
|
|
||||||
|
deferred->BindExit();
|
||||||
|
} else {
|
||||||
|
frame()->CallKeyedStoreIC();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -5381,12 +5626,8 @@ void Reference::GetValue() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
case KEYED: {
|
case KEYED: {
|
||||||
// TODO(181): Implement inlined version of array indexing once
|
|
||||||
// loop nesting is properly tracked on ARM.
|
|
||||||
ASSERT(property != NULL);
|
ASSERT(property != NULL);
|
||||||
Variable* var = expression_->AsVariableProxy()->AsVariable();
|
cgen_->EmitKeyedLoad();
|
||||||
ASSERT(var == NULL || var->is_global());
|
|
||||||
cgen_->EmitKeyedLoad(var != NULL);
|
|
||||||
cgen_->frame()->EmitPush(r0);
|
cgen_->frame()->EmitPush(r0);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -5443,10 +5684,8 @@ void Reference::SetValue(InitState init_state) {
|
|||||||
ASSERT(property != NULL);
|
ASSERT(property != NULL);
|
||||||
cgen_->CodeForSourcePosition(property->position());
|
cgen_->CodeForSourcePosition(property->position());
|
||||||
|
|
||||||
// Call IC code.
|
frame->EmitPop(r0); // Value.
|
||||||
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
|
cgen_->EmitKeyedStore(property->key()->type());
|
||||||
frame->EmitPop(r0); // value
|
|
||||||
frame->CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
|
|
||||||
frame->EmitPush(r0);
|
frame->EmitPush(r0);
|
||||||
cgen_->UnloadReference(this);
|
cgen_->UnloadReference(this);
|
||||||
break;
|
break;
|
||||||
@ -5497,8 +5736,7 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
|
|||||||
|
|
||||||
// Create a new closure through the slower runtime call.
|
// Create a new closure through the slower runtime call.
|
||||||
__ bind(&gc);
|
__ bind(&gc);
|
||||||
__ push(cp);
|
__ Push(cp, r3);
|
||||||
__ push(r3);
|
|
||||||
__ TailCallRuntime(Runtime::kNewClosure, 2, 1);
|
__ TailCallRuntime(Runtime::kNewClosure, 2, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -6145,20 +6383,12 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
|
|||||||
Register result,
|
Register result,
|
||||||
Register scratch1,
|
Register scratch1,
|
||||||
Register scratch2,
|
Register scratch2,
|
||||||
|
Register scratch3,
|
||||||
bool object_is_smi,
|
bool object_is_smi,
|
||||||
Label* not_found) {
|
Label* not_found) {
|
||||||
// Currently only lookup for smis. Check for smi if object is not known to be
|
|
||||||
// a smi.
|
|
||||||
if (!object_is_smi) {
|
|
||||||
ASSERT(kSmiTag == 0);
|
|
||||||
__ tst(object, Operand(kSmiTagMask));
|
|
||||||
__ b(ne, not_found);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use of registers. Register result is used as a temporary.
|
// Use of registers. Register result is used as a temporary.
|
||||||
Register number_string_cache = result;
|
Register number_string_cache = result;
|
||||||
Register mask = scratch1;
|
Register mask = scratch3;
|
||||||
Register scratch = scratch2;
|
|
||||||
|
|
||||||
// Load the number string cache.
|
// Load the number string cache.
|
||||||
__ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
|
__ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
|
||||||
@ -6171,9 +6401,55 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
|
|||||||
__ sub(mask, mask, Operand(1)); // Make mask.
|
__ sub(mask, mask, Operand(1)); // Make mask.
|
||||||
|
|
||||||
// Calculate the entry in the number string cache. The hash value in the
|
// Calculate the entry in the number string cache. The hash value in the
|
||||||
// number string cache for smis is just the smi value.
|
// number string cache for smis is just the smi value, and the hash for
|
||||||
__ and_(scratch, mask, Operand(object, ASR, 1));
|
// doubles is the xor of the upper and lower words. See
|
||||||
|
// Heap::GetNumberStringCache.
|
||||||
|
Label is_smi;
|
||||||
|
Label load_result_from_cache;
|
||||||
|
if (!object_is_smi) {
|
||||||
|
__ BranchOnSmi(object, &is_smi);
|
||||||
|
if (CpuFeatures::IsSupported(VFP3)) {
|
||||||
|
CpuFeatures::Scope scope(VFP3);
|
||||||
|
__ CheckMap(object,
|
||||||
|
scratch1,
|
||||||
|
Factory::heap_number_map(),
|
||||||
|
not_found,
|
||||||
|
true);
|
||||||
|
|
||||||
|
ASSERT_EQ(8, kDoubleSize);
|
||||||
|
__ add(scratch1,
|
||||||
|
object,
|
||||||
|
Operand(HeapNumber::kValueOffset - kHeapObjectTag));
|
||||||
|
__ ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
|
||||||
|
__ eor(scratch1, scratch1, Operand(scratch2));
|
||||||
|
__ and_(scratch1, scratch1, Operand(mask));
|
||||||
|
|
||||||
|
// Calculate address of entry in string cache: each entry consists
|
||||||
|
// of two pointer sized fields.
|
||||||
|
__ add(scratch1,
|
||||||
|
number_string_cache,
|
||||||
|
Operand(scratch1, LSL, kPointerSizeLog2 + 1));
|
||||||
|
|
||||||
|
Register probe = mask;
|
||||||
|
__ ldr(probe,
|
||||||
|
FieldMemOperand(scratch1, FixedArray::kHeaderSize));
|
||||||
|
__ BranchOnSmi(probe, not_found);
|
||||||
|
__ sub(scratch2, object, Operand(kHeapObjectTag));
|
||||||
|
__ vldr(d0, scratch2, HeapNumber::kValueOffset);
|
||||||
|
__ sub(probe, probe, Operand(kHeapObjectTag));
|
||||||
|
__ vldr(d1, probe, HeapNumber::kValueOffset);
|
||||||
|
__ vcmp(d0, d1);
|
||||||
|
__ vmrs(pc);
|
||||||
|
__ b(ne, not_found); // The cache did not contain this value.
|
||||||
|
__ b(&load_result_from_cache);
|
||||||
|
} else {
|
||||||
|
__ b(not_found);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
__ bind(&is_smi);
|
||||||
|
Register scratch = scratch1;
|
||||||
|
__ and_(scratch, mask, Operand(object, ASR, 1));
|
||||||
// Calculate address of entry in string cache: each entry consists
|
// Calculate address of entry in string cache: each entry consists
|
||||||
// of two pointer sized fields.
|
// of two pointer sized fields.
|
||||||
__ add(scratch,
|
__ add(scratch,
|
||||||
@ -6181,15 +6457,15 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
|
|||||||
Operand(scratch, LSL, kPointerSizeLog2 + 1));
|
Operand(scratch, LSL, kPointerSizeLog2 + 1));
|
||||||
|
|
||||||
// Check if the entry is the smi we are looking for.
|
// Check if the entry is the smi we are looking for.
|
||||||
Register object1 = scratch1;
|
Register probe = mask;
|
||||||
__ ldr(object1, FieldMemOperand(scratch, FixedArray::kHeaderSize));
|
__ ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
|
||||||
__ cmp(object, object1);
|
__ cmp(object, probe);
|
||||||
__ b(ne, not_found);
|
__ b(ne, not_found);
|
||||||
|
|
||||||
// Get the result from the cache.
|
// Get the result from the cache.
|
||||||
|
__ bind(&load_result_from_cache);
|
||||||
__ ldr(result,
|
__ ldr(result,
|
||||||
FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
|
FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
|
||||||
|
|
||||||
__ IncrementCounter(&Counters::number_to_string_native,
|
__ IncrementCounter(&Counters::number_to_string_native,
|
||||||
1,
|
1,
|
||||||
scratch1,
|
scratch1,
|
||||||
@ -6203,13 +6479,13 @@ void NumberToStringStub::Generate(MacroAssembler* masm) {
|
|||||||
__ ldr(r1, MemOperand(sp, 0));
|
__ ldr(r1, MemOperand(sp, 0));
|
||||||
|
|
||||||
// Generate code to lookup number in the number string cache.
|
// Generate code to lookup number in the number string cache.
|
||||||
GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, false, &runtime);
|
GenerateLookupNumberStringCache(masm, r1, r0, r2, r3, r4, false, &runtime);
|
||||||
__ add(sp, sp, Operand(1 * kPointerSize));
|
__ add(sp, sp, Operand(1 * kPointerSize));
|
||||||
__ Ret();
|
__ Ret();
|
||||||
|
|
||||||
__ bind(&runtime);
|
__ bind(&runtime);
|
||||||
// Handle number to string in the runtime system if not found in the cache.
|
// Handle number to string in the runtime system if not found in the cache.
|
||||||
__ TailCallRuntime(Runtime::kNumberToString, 1, 1);
|
__ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -6328,8 +6604,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
|
|||||||
|
|
||||||
__ bind(&slow);
|
__ bind(&slow);
|
||||||
|
|
||||||
__ push(r1);
|
__ Push(r1, r0);
|
||||||
__ push(r0);
|
|
||||||
// Figure out which native to call and setup the arguments.
|
// Figure out which native to call and setup the arguments.
|
||||||
Builtins::JavaScript native;
|
Builtins::JavaScript native;
|
||||||
if (cc_ == eq) {
|
if (cc_ == eq) {
|
||||||
@ -6594,8 +6869,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
|
|||||||
__ bind(&slow);
|
__ bind(&slow);
|
||||||
|
|
||||||
// Push arguments to the stack
|
// Push arguments to the stack
|
||||||
__ push(r1);
|
__ Push(r1, r0);
|
||||||
__ push(r0);
|
|
||||||
|
|
||||||
if (Token::ADD == op_) {
|
if (Token::ADD == op_) {
|
||||||
// Test for string arguments before calling runtime.
|
// Test for string arguments before calling runtime.
|
||||||
@ -6624,7 +6898,7 @@ void GenericBinaryOpStub::HandleBinaryOpSlowCases(
|
|||||||
// First argument is a string, second is a smi. Try to lookup the number
|
// First argument is a string, second is a smi. Try to lookup the number
|
||||||
// string for the smi in the number string cache.
|
// string for the smi in the number string cache.
|
||||||
NumberToStringStub::GenerateLookupNumberStringCache(
|
NumberToStringStub::GenerateLookupNumberStringCache(
|
||||||
masm, r0, r2, r4, r5, true, &string1);
|
masm, r0, r2, r4, r5, r6, true, &string1);
|
||||||
|
|
||||||
// Replace second argument on stack and tailcall string add stub to make
|
// Replace second argument on stack and tailcall string add stub to make
|
||||||
// the result.
|
// the result.
|
||||||
@ -6849,8 +7123,7 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
|
|||||||
|
|
||||||
// If all else failed then we go to the runtime system.
|
// If all else failed then we go to the runtime system.
|
||||||
__ bind(&slow);
|
__ bind(&slow);
|
||||||
__ push(lhs); // restore stack
|
__ Push(lhs, rhs); // Restore stack.
|
||||||
__ push(rhs);
|
|
||||||
switch (op_) {
|
switch (op_) {
|
||||||
case Token::BIT_OR:
|
case Token::BIT_OR:
|
||||||
__ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
|
__ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS);
|
||||||
@ -7248,8 +7521,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
|
|||||||
void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
|
void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
|
||||||
Label get_result;
|
Label get_result;
|
||||||
|
|
||||||
__ push(r1);
|
__ Push(r1, r0);
|
||||||
__ push(r0);
|
|
||||||
|
|
||||||
// Internal frame is necessary to handle exceptions properly.
|
// Internal frame is necessary to handle exceptions properly.
|
||||||
__ EnterInternalFrame();
|
__ EnterInternalFrame();
|
||||||
@ -7723,7 +7995,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
|
|||||||
__ mov(r6, Operand(Smi::FromInt(marker)));
|
__ mov(r6, Operand(Smi::FromInt(marker)));
|
||||||
__ mov(r5, Operand(ExternalReference(Top::k_c_entry_fp_address)));
|
__ mov(r5, Operand(ExternalReference(Top::k_c_entry_fp_address)));
|
||||||
__ ldr(r5, MemOperand(r5));
|
__ ldr(r5, MemOperand(r5));
|
||||||
__ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | r8.bit());
|
__ Push(r8, r7, r6, r5);
|
||||||
|
|
||||||
// Setup frame pointer for the frame to be pushed.
|
// Setup frame pointer for the frame to be pushed.
|
||||||
__ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
|
__ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
|
||||||
|
11
deps/v8/src/arm/codegen-arm.h
vendored
11
deps/v8/src/arm/codegen-arm.h
vendored
@ -157,7 +157,7 @@ enum ArgumentsAllocationMode {
|
|||||||
// states of the generated code.
|
// states of the generated code.
|
||||||
enum NopMarkerTypes {
|
enum NopMarkerTypes {
|
||||||
NON_MARKING_NOP = 0,
|
NON_MARKING_NOP = 0,
|
||||||
NAMED_PROPERTY_LOAD_INLINED
|
PROPERTY_ACCESS_INLINED
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@ -318,12 +318,14 @@ class CodeGenerator: public AstVisitor {
|
|||||||
|
|
||||||
// Load a keyed property, leaving it in r0. The receiver and key are
|
// Load a keyed property, leaving it in r0. The receiver and key are
|
||||||
// passed on the stack, and remain there.
|
// passed on the stack, and remain there.
|
||||||
void EmitKeyedLoad(bool is_global);
|
void EmitKeyedLoad();
|
||||||
|
|
||||||
|
// Store a keyed property. Key and receiver are on the stack and the value is
|
||||||
|
// in r0. Result is returned in r0.
|
||||||
|
void EmitKeyedStore(StaticType* key_type);
|
||||||
|
|
||||||
void LoadFromGlobalSlotCheckExtensions(Slot* slot,
|
void LoadFromGlobalSlotCheckExtensions(Slot* slot,
|
||||||
TypeofState typeof_state,
|
TypeofState typeof_state,
|
||||||
Register tmp,
|
|
||||||
Register tmp2,
|
|
||||||
JumpTarget* slow);
|
JumpTarget* slow);
|
||||||
|
|
||||||
// Special code for typeof expressions: Unfortunately, we must
|
// Special code for typeof expressions: Unfortunately, we must
|
||||||
@ -839,6 +841,7 @@ class NumberToStringStub: public CodeStub {
|
|||||||
Register result,
|
Register result,
|
||||||
Register scratch1,
|
Register scratch1,
|
||||||
Register scratch2,
|
Register scratch2,
|
||||||
|
Register scratch3,
|
||||||
bool object_is_smi,
|
bool object_is_smi,
|
||||||
Label* not_found);
|
Label* not_found);
|
||||||
|
|
||||||
|
2
deps/v8/src/arm/debug-arm.cc
vendored
2
deps/v8/src/arm/debug-arm.cc
vendored
@ -133,9 +133,9 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
|
|||||||
void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
|
void Debug::GenerateLoadICDebugBreak(MacroAssembler* masm) {
|
||||||
// Calling convention for IC load (from ic-arm.cc).
|
// Calling convention for IC load (from ic-arm.cc).
|
||||||
// ----------- S t a t e -------------
|
// ----------- S t a t e -------------
|
||||||
// -- r0 : receiver
|
|
||||||
// -- r2 : name
|
// -- r2 : name
|
||||||
// -- lr : return address
|
// -- lr : return address
|
||||||
|
// -- r0 : receiver
|
||||||
// -- [sp] : receiver
|
// -- [sp] : receiver
|
||||||
// -----------------------------------
|
// -----------------------------------
|
||||||
// Registers r0 and r2 contain objects that need to be pushed on the
|
// Registers r0 and r2 contain objects that need to be pushed on the
|
||||||
|
15
deps/v8/src/arm/full-codegen-arm.cc
vendored
15
deps/v8/src/arm/full-codegen-arm.cc
vendored
@ -125,7 +125,7 @@ void FullCodeGenerator::Generate(CompilationInfo* info, Mode mode) {
|
|||||||
__ add(r2, fp,
|
__ add(r2, fp,
|
||||||
Operand(StandardFrameConstants::kCallerSPOffset + offset));
|
Operand(StandardFrameConstants::kCallerSPOffset + offset));
|
||||||
__ mov(r1, Operand(Smi::FromInt(scope()->num_parameters())));
|
__ mov(r1, Operand(Smi::FromInt(scope()->num_parameters())));
|
||||||
__ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit());
|
__ Push(r3, r2, r1);
|
||||||
|
|
||||||
// Arguments to ArgumentsAccessStub:
|
// Arguments to ArgumentsAccessStub:
|
||||||
// function, receiver address, parameter count.
|
// function, receiver address, parameter count.
|
||||||
@ -696,8 +696,8 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
|
|||||||
Comment cmnt(masm_, "Global variable");
|
Comment cmnt(masm_, "Global variable");
|
||||||
// Use inline caching. Variable name is passed in r2 and the global
|
// Use inline caching. Variable name is passed in r2 and the global
|
||||||
// object on the stack.
|
// object on the stack.
|
||||||
__ ldr(ip, CodeGenerator::GlobalObject());
|
__ ldr(r0, CodeGenerator::GlobalObject());
|
||||||
__ push(ip);
|
__ push(r0);
|
||||||
__ mov(r2, Operand(var->name()));
|
__ mov(r2, Operand(var->name()));
|
||||||
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
|
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
|
||||||
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
|
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
|
||||||
@ -739,7 +739,7 @@ void FullCodeGenerator::EmitVariableLoad(Variable* var,
|
|||||||
__ mov(r1, Operand(key_literal->handle()));
|
__ mov(r1, Operand(key_literal->handle()));
|
||||||
|
|
||||||
// Push both as arguments to ic.
|
// Push both as arguments to ic.
|
||||||
__ stm(db_w, sp, r2.bit() | r1.bit());
|
__ Push(r2, r1);
|
||||||
|
|
||||||
// Do a keyed property load.
|
// Do a keyed property load.
|
||||||
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
|
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
|
||||||
@ -771,7 +771,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
|
|||||||
__ mov(r3, Operand(Smi::FromInt(expr->literal_index())));
|
__ mov(r3, Operand(Smi::FromInt(expr->literal_index())));
|
||||||
__ mov(r2, Operand(expr->pattern()));
|
__ mov(r2, Operand(expr->pattern()));
|
||||||
__ mov(r1, Operand(expr->flags()));
|
__ mov(r1, Operand(expr->flags()));
|
||||||
__ stm(db_w, sp, r4.bit() | r3.bit() | r2.bit() | r1.bit());
|
__ Push(r4, r3, r2, r1);
|
||||||
__ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
|
__ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
|
||||||
__ bind(&done);
|
__ bind(&done);
|
||||||
Apply(context_, r0);
|
Apply(context_, r0);
|
||||||
@ -785,7 +785,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
|
|||||||
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
|
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
|
||||||
__ mov(r1, Operand(expr->constant_properties()));
|
__ mov(r1, Operand(expr->constant_properties()));
|
||||||
__ mov(r0, Operand(Smi::FromInt(expr->fast_elements() ? 1 : 0)));
|
__ mov(r0, Operand(Smi::FromInt(expr->fast_elements() ? 1 : 0)));
|
||||||
__ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit() | r0.bit());
|
__ Push(r3, r2, r1, r0);
|
||||||
if (expr->depth() > 1) {
|
if (expr->depth() > 1) {
|
||||||
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
|
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
|
||||||
} else {
|
} else {
|
||||||
@ -860,7 +860,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
|
|||||||
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
|
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
|
||||||
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
|
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
|
||||||
__ mov(r1, Operand(expr->constant_elements()));
|
__ mov(r1, Operand(expr->constant_elements()));
|
||||||
__ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit());
|
__ Push(r3, r2, r1);
|
||||||
if (expr->depth() > 1) {
|
if (expr->depth() > 1) {
|
||||||
__ CallRuntime(Runtime::kCreateArrayLiteral, 3);
|
__ CallRuntime(Runtime::kCreateArrayLiteral, 3);
|
||||||
} else {
|
} else {
|
||||||
@ -997,6 +997,7 @@ void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
|
|||||||
SetSourcePosition(prop->position());
|
SetSourcePosition(prop->position());
|
||||||
Literal* key = prop->key()->AsLiteral();
|
Literal* key = prop->key()->AsLiteral();
|
||||||
__ mov(r2, Operand(key->handle()));
|
__ mov(r2, Operand(key->handle()));
|
||||||
|
__ ldr(r0, MemOperand(sp, 0));
|
||||||
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
|
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
|
||||||
__ Call(ic, RelocInfo::CODE_TARGET);
|
__ Call(ic, RelocInfo::CODE_TARGET);
|
||||||
}
|
}
|
||||||
|
177
deps/v8/src/arm/ic-arm.cc
vendored
177
deps/v8/src/arm/ic-arm.cc
vendored
@ -61,6 +61,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
|
|||||||
// dictionary.
|
// dictionary.
|
||||||
//
|
//
|
||||||
// r2 - holds the name of the property and is unchanged.
|
// r2 - holds the name of the property and is unchanged.
|
||||||
|
// r4 - used as temporary.
|
||||||
|
|
||||||
Label done;
|
Label done;
|
||||||
|
|
||||||
@ -108,25 +109,25 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
|
|||||||
static const int kProbes = 4;
|
static const int kProbes = 4;
|
||||||
for (int i = 0; i < kProbes; i++) {
|
for (int i = 0; i < kProbes; i++) {
|
||||||
// Compute the masked index: (hash + i + i * i) & mask.
|
// Compute the masked index: (hash + i + i * i) & mask.
|
||||||
__ ldr(t1, FieldMemOperand(r2, String::kHashFieldOffset));
|
__ ldr(r4, FieldMemOperand(r2, String::kHashFieldOffset));
|
||||||
if (i > 0) {
|
if (i > 0) {
|
||||||
// Add the probe offset (i + i * i) left shifted to avoid right shifting
|
// Add the probe offset (i + i * i) left shifted to avoid right shifting
|
||||||
// the hash in a separate instruction. The value hash + i + i * i is right
|
// the hash in a separate instruction. The value hash + i + i * i is right
|
||||||
// shifted in the following and instruction.
|
// shifted in the following and instruction.
|
||||||
ASSERT(StringDictionary::GetProbeOffset(i) <
|
ASSERT(StringDictionary::GetProbeOffset(i) <
|
||||||
1 << (32 - String::kHashFieldOffset));
|
1 << (32 - String::kHashFieldOffset));
|
||||||
__ add(t1, t1, Operand(
|
__ add(r4, r4, Operand(
|
||||||
StringDictionary::GetProbeOffset(i) << String::kHashShift));
|
StringDictionary::GetProbeOffset(i) << String::kHashShift));
|
||||||
}
|
}
|
||||||
__ and_(t1, r3, Operand(t1, LSR, String::kHashShift));
|
__ and_(r4, r3, Operand(r4, LSR, String::kHashShift));
|
||||||
|
|
||||||
// Scale the index by multiplying by the element size.
|
// Scale the index by multiplying by the element size.
|
||||||
ASSERT(StringDictionary::kEntrySize == 3);
|
ASSERT(StringDictionary::kEntrySize == 3);
|
||||||
__ add(t1, t1, Operand(t1, LSL, 1)); // t1 = t1 * 3
|
__ add(r4, r4, Operand(r4, LSL, 1)); // r4 = r4 * 3
|
||||||
|
|
||||||
// Check if the key is identical to the name.
|
// Check if the key is identical to the name.
|
||||||
__ add(t1, t0, Operand(t1, LSL, 2));
|
__ add(r4, t0, Operand(r4, LSL, 2));
|
||||||
__ ldr(ip, FieldMemOperand(t1, kElementsStartOffset));
|
__ ldr(ip, FieldMemOperand(r4, kElementsStartOffset));
|
||||||
__ cmp(r2, Operand(ip));
|
__ cmp(r2, Operand(ip));
|
||||||
if (i != kProbes - 1) {
|
if (i != kProbes - 1) {
|
||||||
__ b(eq, &done);
|
__ b(eq, &done);
|
||||||
@ -136,13 +137,13 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check that the value is a normal property.
|
// Check that the value is a normal property.
|
||||||
__ bind(&done); // t1 == t0 + 4*index
|
__ bind(&done); // r4 == t0 + 4*index
|
||||||
__ ldr(r3, FieldMemOperand(t1, kElementsStartOffset + 2 * kPointerSize));
|
__ ldr(r3, FieldMemOperand(r4, kElementsStartOffset + 2 * kPointerSize));
|
||||||
__ tst(r3, Operand(PropertyDetails::TypeField::mask() << kSmiTagSize));
|
__ tst(r3, Operand(PropertyDetails::TypeField::mask() << kSmiTagSize));
|
||||||
__ b(ne, miss);
|
__ b(ne, miss);
|
||||||
|
|
||||||
// Get the value at the masked, scaled index and return.
|
// Get the value at the masked, scaled index and return.
|
||||||
__ ldr(t1, FieldMemOperand(t1, kElementsStartOffset + 1 * kPointerSize));
|
__ ldr(t1, FieldMemOperand(r4, kElementsStartOffset + 1 * kPointerSize));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -239,12 +240,11 @@ void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
|
|||||||
// ----------- S t a t e -------------
|
// ----------- S t a t e -------------
|
||||||
// -- r2 : name
|
// -- r2 : name
|
||||||
// -- lr : return address
|
// -- lr : return address
|
||||||
// -- [sp] : receiver
|
// -- r0 : receiver
|
||||||
|
// -- sp[0] : receiver
|
||||||
// -----------------------------------
|
// -----------------------------------
|
||||||
Label miss;
|
Label miss;
|
||||||
|
|
||||||
__ ldr(r0, MemOperand(sp, 0));
|
|
||||||
|
|
||||||
StubCompiler::GenerateLoadArrayLength(masm, r0, r3, &miss);
|
StubCompiler::GenerateLoadArrayLength(masm, r0, r3, &miss);
|
||||||
__ bind(&miss);
|
__ bind(&miss);
|
||||||
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
|
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
|
||||||
@ -255,12 +255,11 @@ void LoadIC::GenerateStringLength(MacroAssembler* masm) {
|
|||||||
// ----------- S t a t e -------------
|
// ----------- S t a t e -------------
|
||||||
// -- r2 : name
|
// -- r2 : name
|
||||||
// -- lr : return address
|
// -- lr : return address
|
||||||
// -- [sp] : receiver
|
// -- r0 : receiver
|
||||||
|
// -- sp[0] : receiver
|
||||||
// -----------------------------------
|
// -----------------------------------
|
||||||
Label miss;
|
Label miss;
|
||||||
|
|
||||||
__ ldr(r0, MemOperand(sp, 0));
|
|
||||||
|
|
||||||
StubCompiler::GenerateLoadStringLength(masm, r0, r1, r3, &miss);
|
StubCompiler::GenerateLoadStringLength(masm, r0, r1, r3, &miss);
|
||||||
// Cache miss: Jump to runtime.
|
// Cache miss: Jump to runtime.
|
||||||
__ bind(&miss);
|
__ bind(&miss);
|
||||||
@ -272,13 +271,11 @@ void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) {
|
|||||||
// ----------- S t a t e -------------
|
// ----------- S t a t e -------------
|
||||||
// -- r2 : name
|
// -- r2 : name
|
||||||
// -- lr : return address
|
// -- lr : return address
|
||||||
// -- [sp] : receiver
|
// -- r0 : receiver
|
||||||
|
// -- sp[0] : receiver
|
||||||
// -----------------------------------
|
// -----------------------------------
|
||||||
Label miss;
|
Label miss;
|
||||||
|
|
||||||
// Load receiver.
|
|
||||||
__ ldr(r0, MemOperand(sp, 0));
|
|
||||||
|
|
||||||
StubCompiler::GenerateLoadFunctionPrototype(masm, r0, r1, r3, &miss);
|
StubCompiler::GenerateLoadFunctionPrototype(masm, r0, r1, r3, &miss);
|
||||||
__ bind(&miss);
|
__ bind(&miss);
|
||||||
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
|
StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC);
|
||||||
@ -351,7 +348,8 @@ void CallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) {
|
|||||||
static void GenerateNormalHelper(MacroAssembler* masm,
|
static void GenerateNormalHelper(MacroAssembler* masm,
|
||||||
int argc,
|
int argc,
|
||||||
bool is_global_object,
|
bool is_global_object,
|
||||||
Label* miss) {
|
Label* miss,
|
||||||
|
Register scratch) {
|
||||||
// Search dictionary - put result in register r1.
|
// Search dictionary - put result in register r1.
|
||||||
GenerateDictionaryLoad(masm, miss, r0, r1);
|
GenerateDictionaryLoad(masm, miss, r0, r1);
|
||||||
|
|
||||||
@ -360,7 +358,7 @@ static void GenerateNormalHelper(MacroAssembler* masm,
|
|||||||
__ b(eq, miss);
|
__ b(eq, miss);
|
||||||
|
|
||||||
// Check that the value is a JSFunction.
|
// Check that the value is a JSFunction.
|
||||||
__ CompareObjectType(r1, r0, r0, JS_FUNCTION_TYPE);
|
__ CompareObjectType(r1, scratch, scratch, JS_FUNCTION_TYPE);
|
||||||
__ b(ne, miss);
|
__ b(ne, miss);
|
||||||
|
|
||||||
// Patch the receiver with the global proxy if necessary.
|
// Patch the receiver with the global proxy if necessary.
|
||||||
@ -409,7 +407,7 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
|
|||||||
__ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
|
__ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset));
|
||||||
__ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
|
__ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
|
||||||
__ b(ne, &miss);
|
__ b(ne, &miss);
|
||||||
GenerateNormalHelper(masm, argc, true, &miss);
|
GenerateNormalHelper(masm, argc, true, &miss, r4);
|
||||||
|
|
||||||
// Accessing non-global object: Check for access to global proxy.
|
// Accessing non-global object: Check for access to global proxy.
|
||||||
Label global_proxy, invoke;
|
Label global_proxy, invoke;
|
||||||
@ -422,7 +420,7 @@ void CallIC::GenerateNormal(MacroAssembler* masm, int argc) {
|
|||||||
__ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
|
__ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
|
||||||
__ b(ne, &miss);
|
__ b(ne, &miss);
|
||||||
__ bind(&invoke);
|
__ bind(&invoke);
|
||||||
GenerateNormalHelper(masm, argc, false, &miss);
|
GenerateNormalHelper(masm, argc, false, &miss, r4);
|
||||||
|
|
||||||
// Global object access: Check access rights.
|
// Global object access: Check access rights.
|
||||||
__ bind(&global_proxy);
|
__ bind(&global_proxy);
|
||||||
@ -447,7 +445,7 @@ void CallIC::GenerateMiss(MacroAssembler* masm, int argc) {
|
|||||||
__ EnterInternalFrame();
|
__ EnterInternalFrame();
|
||||||
|
|
||||||
// Push the receiver and the name of the function.
|
// Push the receiver and the name of the function.
|
||||||
__ stm(db_w, sp, r2.bit() | r3.bit());
|
__ Push(r3, r2);
|
||||||
|
|
||||||
// Call the entry.
|
// Call the entry.
|
||||||
__ mov(r0, Operand(2));
|
__ mov(r0, Operand(2));
|
||||||
@ -489,10 +487,10 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
|
|||||||
// ----------- S t a t e -------------
|
// ----------- S t a t e -------------
|
||||||
// -- r2 : name
|
// -- r2 : name
|
||||||
// -- lr : return address
|
// -- lr : return address
|
||||||
// -- [sp] : receiver
|
// -- r0 : receiver
|
||||||
|
// -- sp[0] : receiver
|
||||||
// -----------------------------------
|
// -----------------------------------
|
||||||
|
|
||||||
__ ldr(r0, MemOperand(sp, 0));
|
|
||||||
// Probe the stub cache.
|
// Probe the stub cache.
|
||||||
Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
|
Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC,
|
||||||
NOT_IN_LOOP,
|
NOT_IN_LOOP,
|
||||||
@ -508,11 +506,11 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
|
|||||||
// ----------- S t a t e -------------
|
// ----------- S t a t e -------------
|
||||||
// -- r2 : name
|
// -- r2 : name
|
||||||
// -- lr : return address
|
// -- lr : return address
|
||||||
// -- [sp] : receiver
|
// -- r0 : receiver
|
||||||
|
// -- sp[0] : receiver
|
||||||
// -----------------------------------
|
// -----------------------------------
|
||||||
Label miss, probe, global;
|
Label miss, probe, global;
|
||||||
|
|
||||||
__ ldr(r0, MemOperand(sp, 0));
|
|
||||||
// Check that the receiver isn't a smi.
|
// Check that the receiver isn't a smi.
|
||||||
__ tst(r0, Operand(kSmiTagMask));
|
__ tst(r0, Operand(kSmiTagMask));
|
||||||
__ b(eq, &miss);
|
__ b(eq, &miss);
|
||||||
@ -551,11 +549,12 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
|
|||||||
// ----------- S t a t e -------------
|
// ----------- S t a t e -------------
|
||||||
// -- r2 : name
|
// -- r2 : name
|
||||||
// -- lr : return address
|
// -- lr : return address
|
||||||
// -- [sp] : receiver
|
// -- r0 : receiver
|
||||||
|
// -- sp[0] : receiver
|
||||||
// -----------------------------------
|
// -----------------------------------
|
||||||
|
|
||||||
__ ldr(r3, MemOperand(sp, 0));
|
__ mov(r3, r0);
|
||||||
__ stm(db_w, sp, r2.bit() | r3.bit());
|
__ Push(r3, r2);
|
||||||
|
|
||||||
// Perform tail call to the entry.
|
// Perform tail call to the entry.
|
||||||
ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss));
|
ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss));
|
||||||
@ -563,15 +562,8 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void LoadIC::ClearInlinedVersion(Address address) {
|
static inline bool IsInlinedICSite(Address address,
|
||||||
// Reset the map check of the inlined inobject property load (if present) to
|
Address* inline_end_address) {
|
||||||
// guarantee failure by holding an invalid map (the null value). The offset
|
|
||||||
// can be patched to anything.
|
|
||||||
PatchInlinedLoad(address, Heap::null_value(), 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
|
|
||||||
// If the instruction after the call site is not the pseudo instruction nop1
|
// If the instruction after the call site is not the pseudo instruction nop1
|
||||||
// then this is not related to an inlined in-object property load. The nop1
|
// then this is not related to an inlined in-object property load. The nop1
|
||||||
// instruction is located just after the call to the IC in the deferred code
|
// instruction is located just after the call to the IC in the deferred code
|
||||||
@ -579,24 +571,42 @@ bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
|
|||||||
// a branch instruction for jumping back from the deferred code.
|
// a branch instruction for jumping back from the deferred code.
|
||||||
Address address_after_call = address + Assembler::kCallTargetAddressOffset;
|
Address address_after_call = address + Assembler::kCallTargetAddressOffset;
|
||||||
Instr instr_after_call = Assembler::instr_at(address_after_call);
|
Instr instr_after_call = Assembler::instr_at(address_after_call);
|
||||||
if (!Assembler::IsNop(instr_after_call, NAMED_PROPERTY_LOAD_INLINED)) {
|
if (!Assembler::IsNop(instr_after_call, PROPERTY_ACCESS_INLINED)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
ASSERT_EQ(0, RegisterAllocator::kNumRegisters);
|
Address address_after_nop = address_after_call + Assembler::kInstrSize;
|
||||||
Address address_after_nop1 = address_after_call + Assembler::kInstrSize;
|
Instr instr_after_nop = Assembler::instr_at(address_after_nop);
|
||||||
Instr instr_after_nop1 = Assembler::instr_at(address_after_nop1);
|
ASSERT(Assembler::IsBranch(instr_after_nop));
|
||||||
ASSERT(Assembler::IsBranch(instr_after_nop1));
|
|
||||||
|
|
||||||
// Find the end of the inlined code for handling the load.
|
// Find the end of the inlined code for handling the load.
|
||||||
int b_offset =
|
int b_offset =
|
||||||
Assembler::GetBranchOffset(instr_after_nop1) + Assembler::kPcLoadDelta;
|
Assembler::GetBranchOffset(instr_after_nop) + Assembler::kPcLoadDelta;
|
||||||
ASSERT(b_offset < 0); // Jumping back from deferred code.
|
ASSERT(b_offset < 0); // Jumping back from deferred code.
|
||||||
Address inline_end_address = address_after_nop1 + b_offset;
|
*inline_end_address = address_after_nop + b_offset;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void LoadIC::ClearInlinedVersion(Address address) {
|
||||||
|
// Reset the map check of the inlined in-object property load (if present) to
|
||||||
|
// guarantee failure by holding an invalid map (the null value). The offset
|
||||||
|
// can be patched to anything.
|
||||||
|
PatchInlinedLoad(address, Heap::null_value(), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
|
||||||
|
// Find the end of the inlined code for handling the load if this is an
|
||||||
|
// inlined IC call site.
|
||||||
|
Address inline_end_address;
|
||||||
|
if (!IsInlinedICSite(address, &inline_end_address)) return false;
|
||||||
|
|
||||||
// Patch the offset of the property load instruction (ldr r0, [r1, #+XXX]).
|
// Patch the offset of the property load instruction (ldr r0, [r1, #+XXX]).
|
||||||
// The immediate must be represenatble in 12 bits.
|
// The immediate must be representable in 12 bits.
|
||||||
ASSERT((JSObject::kMaxInstanceSize - JSObject::kHeaderSize) < (1 << 12));
|
ASSERT((JSObject::kMaxInstanceSize - JSObject::kHeaderSize) < (1 << 12));
|
||||||
Address ldr_property_instr_address = inline_end_address - 4;
|
Address ldr_property_instr_address =
|
||||||
|
inline_end_address - Assembler::kInstrSize;
|
||||||
ASSERT(Assembler::IsLdrRegisterImmediate(
|
ASSERT(Assembler::IsLdrRegisterImmediate(
|
||||||
Assembler::instr_at(ldr_property_instr_address)));
|
Assembler::instr_at(ldr_property_instr_address)));
|
||||||
Instr ldr_property_instr = Assembler::instr_at(ldr_property_instr_address);
|
Instr ldr_property_instr = Assembler::instr_at(ldr_property_instr_address);
|
||||||
@ -608,29 +618,61 @@ bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
|
|||||||
CPU::FlushICache(ldr_property_instr_address, 1 * Assembler::kInstrSize);
|
CPU::FlushICache(ldr_property_instr_address, 1 * Assembler::kInstrSize);
|
||||||
|
|
||||||
// Patch the map check.
|
// Patch the map check.
|
||||||
Address ldr_map_instr_address = inline_end_address - 16;
|
Address ldr_map_instr_address =
|
||||||
|
inline_end_address - 4 * Assembler::kInstrSize;
|
||||||
Assembler::set_target_address_at(ldr_map_instr_address,
|
Assembler::set_target_address_at(ldr_map_instr_address,
|
||||||
reinterpret_cast<Address>(map));
|
reinterpret_cast<Address>(map));
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void KeyedLoadIC::ClearInlinedVersion(Address address) {}
|
void KeyedLoadIC::ClearInlinedVersion(Address address) {
|
||||||
|
// Reset the map check of the inlined keyed load (if present) to
|
||||||
|
// guarantee failure by holding an invalid map (the null value).
|
||||||
bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
|
PatchInlinedLoad(address, Heap::null_value());
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void KeyedStoreIC::ClearInlinedVersion(Address address) {}
|
bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
|
||||||
|
Address inline_end_address;
|
||||||
|
if (!IsInlinedICSite(address, &inline_end_address)) return false;
|
||||||
|
|
||||||
|
// Patch the map check.
|
||||||
|
Address ldr_map_instr_address =
|
||||||
|
inline_end_address - 18 * Assembler::kInstrSize;
|
||||||
|
Assembler::set_target_address_at(ldr_map_instr_address,
|
||||||
|
reinterpret_cast<Address>(map));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void KeyedStoreIC::RestoreInlinedVersion(Address address) {}
|
void KeyedStoreIC::ClearInlinedVersion(Address address) {
|
||||||
|
// Insert null as the elements map to check for. This will make
|
||||||
|
// sure that the elements fast-case map check fails so that control
|
||||||
|
// flows to the IC instead of the inlined version.
|
||||||
|
PatchInlinedStore(address, Heap::null_value());
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void KeyedStoreIC::RestoreInlinedVersion(Address address) {
|
||||||
|
// Restore the fast-case elements map check so that the inlined
|
||||||
|
// version can be used again.
|
||||||
|
PatchInlinedStore(address, Heap::fixed_array_map());
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
|
bool KeyedStoreIC::PatchInlinedStore(Address address, Object* map) {
|
||||||
return false;
|
// Find the end of the inlined code for handling the store if this is an
|
||||||
|
// inlined IC call site.
|
||||||
|
Address inline_end_address;
|
||||||
|
if (!IsInlinedICSite(address, &inline_end_address)) return false;
|
||||||
|
|
||||||
|
// Patch the map check.
|
||||||
|
Address ldr_map_instr_address =
|
||||||
|
inline_end_address - 5 * Assembler::kInstrSize;
|
||||||
|
Assembler::set_target_address_at(ldr_map_instr_address,
|
||||||
|
reinterpret_cast<Address>(map));
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -645,7 +687,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
|
|||||||
// -----------------------------------
|
// -----------------------------------
|
||||||
|
|
||||||
__ ldm(ia, sp, r2.bit() | r3.bit());
|
__ ldm(ia, sp, r2.bit() | r3.bit());
|
||||||
__ stm(db_w, sp, r2.bit() | r3.bit());
|
__ Push(r3, r2);
|
||||||
|
|
||||||
ExternalReference ref = ExternalReference(IC_Utility(kKeyedLoadIC_Miss));
|
ExternalReference ref = ExternalReference(IC_Utility(kKeyedLoadIC_Miss));
|
||||||
__ TailCallExternalReference(ref, 2, 1);
|
__ TailCallExternalReference(ref, 2, 1);
|
||||||
@ -660,7 +702,7 @@ void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
|
|||||||
// -----------------------------------
|
// -----------------------------------
|
||||||
|
|
||||||
__ ldm(ia, sp, r2.bit() | r3.bit());
|
__ ldm(ia, sp, r2.bit() | r3.bit());
|
||||||
__ stm(db_w, sp, r2.bit() | r3.bit());
|
__ Push(r3, r2);
|
||||||
|
|
||||||
__ TailCallRuntime(Runtime::kGetProperty, 2, 1);
|
__ TailCallRuntime(Runtime::kGetProperty, 2, 1);
|
||||||
}
|
}
|
||||||
@ -778,7 +820,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
|
|||||||
__ bind(&index_ok);
|
__ bind(&index_ok);
|
||||||
// Duplicate receiver and key since they are expected on the stack after
|
// Duplicate receiver and key since they are expected on the stack after
|
||||||
// the KeyedLoadIC call.
|
// the KeyedLoadIC call.
|
||||||
__ stm(db_w, sp, r0.bit() | r1.bit());
|
__ Push(r1, r0);
|
||||||
__ InvokeBuiltin(Builtins::STRING_CHAR_AT, JUMP_JS);
|
__ InvokeBuiltin(Builtins::STRING_CHAR_AT, JUMP_JS);
|
||||||
|
|
||||||
__ bind(&miss);
|
__ bind(&miss);
|
||||||
@ -1094,8 +1136,7 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
|
|||||||
__ b(ne, &slow);
|
__ b(ne, &slow);
|
||||||
|
|
||||||
// Everything is fine, call runtime.
|
// Everything is fine, call runtime.
|
||||||
__ push(r1); // receiver
|
__ Push(r1, r0); // Receiver, key.
|
||||||
__ push(r0); // key
|
|
||||||
|
|
||||||
// Perform tail call to the entry.
|
// Perform tail call to the entry.
|
||||||
__ TailCallExternalReference(ExternalReference(
|
__ TailCallExternalReference(ExternalReference(
|
||||||
@ -1115,7 +1156,7 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
|
|||||||
// -----------------------------------
|
// -----------------------------------
|
||||||
|
|
||||||
__ ldm(ia, sp, r2.bit() | r3.bit());
|
__ ldm(ia, sp, r2.bit() | r3.bit());
|
||||||
__ stm(db_w, sp, r0.bit() | r2.bit() | r3.bit());
|
__ Push(r3, r2, r0);
|
||||||
|
|
||||||
ExternalReference ref = ExternalReference(IC_Utility(kKeyedStoreIC_Miss));
|
ExternalReference ref = ExternalReference(IC_Utility(kKeyedStoreIC_Miss));
|
||||||
__ TailCallExternalReference(ref, 3, 1);
|
__ TailCallExternalReference(ref, 3, 1);
|
||||||
@ -1130,7 +1171,7 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm) {
|
|||||||
// -- sp[1] : receiver
|
// -- sp[1] : receiver
|
||||||
// -----------------------------------
|
// -----------------------------------
|
||||||
__ ldm(ia, sp, r1.bit() | r3.bit()); // r0 == value, r1 == key, r3 == object
|
__ ldm(ia, sp, r1.bit() | r3.bit()); // r0 == value, r1 == key, r3 == object
|
||||||
__ stm(db_w, sp, r0.bit() | r1.bit() | r3.bit());
|
__ Push(r3, r1, r0);
|
||||||
|
|
||||||
__ TailCallRuntime(Runtime::kSetProperty, 3, 1);
|
__ TailCallRuntime(Runtime::kSetProperty, 3, 1);
|
||||||
}
|
}
|
||||||
@ -1684,8 +1725,7 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) {
|
|||||||
// -- lr : return address
|
// -- lr : return address
|
||||||
// -----------------------------------
|
// -----------------------------------
|
||||||
|
|
||||||
__ push(r1);
|
__ Push(r1, r2, r0);
|
||||||
__ stm(db_w, sp, r2.bit() | r0.bit());
|
|
||||||
|
|
||||||
// Perform tail call to the entry.
|
// Perform tail call to the entry.
|
||||||
ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_Miss));
|
ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_Miss));
|
||||||
@ -1729,8 +1769,7 @@ void StoreIC::GenerateArrayLength(MacroAssembler* masm) {
|
|||||||
__ BranchOnNotSmi(value, &miss);
|
__ BranchOnNotSmi(value, &miss);
|
||||||
|
|
||||||
// Prepare tail call to StoreIC_ArrayLength.
|
// Prepare tail call to StoreIC_ArrayLength.
|
||||||
__ push(receiver);
|
__ Push(receiver, value);
|
||||||
__ push(value);
|
|
||||||
|
|
||||||
ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_ArrayLength));
|
ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_ArrayLength));
|
||||||
__ TailCallExternalReference(ref, 2, 1);
|
__ TailCallExternalReference(ref, 2, 1);
|
||||||
|
4
deps/v8/src/arm/macro-assembler-arm.cc
vendored
4
deps/v8/src/arm/macro-assembler-arm.cc
vendored
@ -1101,11 +1101,11 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void MacroAssembler::CompareObjectType(Register function,
|
void MacroAssembler::CompareObjectType(Register object,
|
||||||
Register map,
|
Register map,
|
||||||
Register type_reg,
|
Register type_reg,
|
||||||
InstanceType type) {
|
InstanceType type) {
|
||||||
ldr(map, FieldMemOperand(function, HeapObject::kMapOffset));
|
ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
|
||||||
CompareInstanceType(map, type_reg, type);
|
CompareInstanceType(map, type_reg, type);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
59
deps/v8/src/arm/macro-assembler-arm.h
vendored
59
deps/v8/src/arm/macro-assembler-arm.h
vendored
@ -93,6 +93,65 @@ class MacroAssembler: public Assembler {
|
|||||||
// well as the ip register.
|
// well as the ip register.
|
||||||
void RecordWrite(Register object, Register offset, Register scratch);
|
void RecordWrite(Register object, Register offset, Register scratch);
|
||||||
|
|
||||||
|
// Push two registers. Pushes leftmost register first (to highest address).
|
||||||
|
void Push(Register src1, Register src2, Condition cond = al) {
|
||||||
|
ASSERT(!src1.is(src2));
|
||||||
|
if (src1.code() > src2.code()) {
|
||||||
|
stm(db_w, sp, src1.bit() | src2.bit(), cond);
|
||||||
|
} else {
|
||||||
|
str(src1, MemOperand(sp, 4, NegPreIndex), cond);
|
||||||
|
str(src2, MemOperand(sp, 4, NegPreIndex), cond);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Push three registers. Pushes leftmost register first (to highest address).
|
||||||
|
void Push(Register src1, Register src2, Register src3, Condition cond = al) {
|
||||||
|
ASSERT(!src1.is(src2));
|
||||||
|
ASSERT(!src2.is(src3));
|
||||||
|
ASSERT(!src1.is(src3));
|
||||||
|
if (src1.code() > src2.code()) {
|
||||||
|
if (src2.code() > src3.code()) {
|
||||||
|
stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
|
||||||
|
} else {
|
||||||
|
stm(db_w, sp, src1.bit() | src2.bit(), cond);
|
||||||
|
str(src3, MemOperand(sp, 4, NegPreIndex), cond);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
str(src1, MemOperand(sp, 4, NegPreIndex), cond);
|
||||||
|
Push(src2, src3, cond);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Push four registers. Pushes leftmost register first (to highest address).
|
||||||
|
void Push(Register src1, Register src2,
|
||||||
|
Register src3, Register src4, Condition cond = al) {
|
||||||
|
ASSERT(!src1.is(src2));
|
||||||
|
ASSERT(!src2.is(src3));
|
||||||
|
ASSERT(!src1.is(src3));
|
||||||
|
ASSERT(!src1.is(src4));
|
||||||
|
ASSERT(!src2.is(src4));
|
||||||
|
ASSERT(!src3.is(src4));
|
||||||
|
if (src1.code() > src2.code()) {
|
||||||
|
if (src2.code() > src3.code()) {
|
||||||
|
if (src3.code() > src4.code()) {
|
||||||
|
stm(db_w,
|
||||||
|
sp,
|
||||||
|
src1.bit() | src2.bit() | src3.bit() | src4.bit(),
|
||||||
|
cond);
|
||||||
|
} else {
|
||||||
|
stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
|
||||||
|
str(src4, MemOperand(sp, 4, NegPreIndex), cond);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
stm(db_w, sp, src1.bit() | src2.bit(), cond);
|
||||||
|
Push(src3, src4, cond);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
str(src1, MemOperand(sp, 4, NegPreIndex), cond);
|
||||||
|
Push(src2, src3, src4, cond);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// Stack limit support
|
// Stack limit support
|
||||||
|
|
||||||
|
6
deps/v8/src/arm/simulator-arm.cc
vendored
6
deps/v8/src/arm/simulator-arm.cc
vendored
@ -541,7 +541,6 @@ void Simulator::FlushOnePage(intptr_t start, int size) {
|
|||||||
|
|
||||||
|
|
||||||
void Simulator::CheckICache(Instr* instr) {
|
void Simulator::CheckICache(Instr* instr) {
|
||||||
#ifdef DEBUG
|
|
||||||
intptr_t address = reinterpret_cast<intptr_t>(instr);
|
intptr_t address = reinterpret_cast<intptr_t>(instr);
|
||||||
void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
|
void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
|
||||||
void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
|
void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
|
||||||
@ -560,7 +559,6 @@ void Simulator::CheckICache(Instr* instr) {
|
|||||||
memcpy(cached_line, line, CachePage::kLineLength);
|
memcpy(cached_line, line, CachePage::kLineLength);
|
||||||
*cache_valid_byte = CachePage::LINE_VALID;
|
*cache_valid_byte = CachePage::LINE_VALID;
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -2441,7 +2439,9 @@ void Simulator::DecodeType6CoprocessorIns(Instr* instr) {
|
|||||||
|
|
||||||
// Executes the current instruction.
|
// Executes the current instruction.
|
||||||
void Simulator::InstructionDecode(Instr* instr) {
|
void Simulator::InstructionDecode(Instr* instr) {
|
||||||
CheckICache(instr);
|
if (v8::internal::FLAG_check_icache) {
|
||||||
|
CheckICache(instr);
|
||||||
|
}
|
||||||
pc_modified_ = false;
|
pc_modified_ = false;
|
||||||
if (::v8::internal::FLAG_trace_sim) {
|
if (::v8::internal::FLAG_trace_sim) {
|
||||||
disasm::NameConverter converter;
|
disasm::NameConverter converter;
|
||||||
|
45
deps/v8/src/arm/stub-cache-arm.cc
vendored
45
deps/v8/src/arm/stub-cache-arm.cc
vendored
@ -296,7 +296,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
|
|||||||
// We jump to a runtime call that extends the properties array.
|
// We jump to a runtime call that extends the properties array.
|
||||||
__ push(receiver_reg);
|
__ push(receiver_reg);
|
||||||
__ mov(r2, Operand(Handle<Map>(transition)));
|
__ mov(r2, Operand(Handle<Map>(transition)));
|
||||||
__ stm(db_w, sp, r2.bit() | r0.bit());
|
__ Push(r2, r0);
|
||||||
__ TailCallExternalReference(
|
__ TailCallExternalReference(
|
||||||
ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage)),
|
ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage)),
|
||||||
3, 1);
|
3, 1);
|
||||||
@ -464,8 +464,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
|
|||||||
__ EnterInternalFrame();
|
__ EnterInternalFrame();
|
||||||
|
|
||||||
__ push(receiver);
|
__ push(receiver);
|
||||||
__ push(holder);
|
__ Push(holder, name_);
|
||||||
__ push(name_);
|
|
||||||
|
|
||||||
CompileCallLoadPropertyWithInterceptor(masm,
|
CompileCallLoadPropertyWithInterceptor(masm,
|
||||||
receiver,
|
receiver,
|
||||||
@ -510,8 +509,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
|
|||||||
|
|
||||||
Label cleanup;
|
Label cleanup;
|
||||||
__ pop(scratch2);
|
__ pop(scratch2);
|
||||||
__ push(receiver);
|
__ Push(receiver, scratch2);
|
||||||
__ push(scratch2);
|
|
||||||
|
|
||||||
holder = stub_compiler->CheckPrototypes(holder_obj, holder,
|
holder = stub_compiler->CheckPrototypes(holder_obj, holder,
|
||||||
lookup->holder(), scratch1,
|
lookup->holder(), scratch1,
|
||||||
@ -523,8 +521,7 @@ class LoadInterceptorCompiler BASE_EMBEDDED {
|
|||||||
__ Move(holder, Handle<AccessorInfo>(callback));
|
__ Move(holder, Handle<AccessorInfo>(callback));
|
||||||
__ push(holder);
|
__ push(holder);
|
||||||
__ ldr(scratch1, FieldMemOperand(holder, AccessorInfo::kDataOffset));
|
__ ldr(scratch1, FieldMemOperand(holder, AccessorInfo::kDataOffset));
|
||||||
__ push(scratch1);
|
__ Push(scratch1, name_);
|
||||||
__ push(name_);
|
|
||||||
|
|
||||||
ExternalReference ref =
|
ExternalReference ref =
|
||||||
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
|
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
|
||||||
@ -725,13 +722,11 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object,
|
|||||||
CheckPrototypes(object, receiver, holder, scratch1, scratch2, name, miss);
|
CheckPrototypes(object, receiver, holder, scratch1, scratch2, name, miss);
|
||||||
|
|
||||||
// Push the arguments on the JS stack of the caller.
|
// Push the arguments on the JS stack of the caller.
|
||||||
__ push(receiver); // receiver
|
__ push(receiver); // Receiver.
|
||||||
__ push(reg); // holder
|
__ push(reg); // Holder.
|
||||||
__ mov(ip, Operand(Handle<AccessorInfo>(callback))); // callback data
|
__ mov(ip, Operand(Handle<AccessorInfo>(callback))); // callback data
|
||||||
__ push(ip);
|
|
||||||
__ ldr(reg, FieldMemOperand(ip, AccessorInfo::kDataOffset));
|
__ ldr(reg, FieldMemOperand(ip, AccessorInfo::kDataOffset));
|
||||||
__ push(reg);
|
__ Push(ip, reg, name_reg);
|
||||||
__ push(name_reg); // name
|
|
||||||
|
|
||||||
// Do tail-call to the runtime system.
|
// Do tail-call to the runtime system.
|
||||||
ExternalReference load_callback_property =
|
ExternalReference load_callback_property =
|
||||||
@ -1105,8 +1100,7 @@ Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
|
|||||||
|
|
||||||
// Call the interceptor.
|
// Call the interceptor.
|
||||||
__ EnterInternalFrame();
|
__ EnterInternalFrame();
|
||||||
__ push(holder_reg);
|
__ Push(holder_reg, name_reg);
|
||||||
__ push(name_reg);
|
|
||||||
CompileCallLoadPropertyWithInterceptor(masm(),
|
CompileCallLoadPropertyWithInterceptor(masm(),
|
||||||
receiver,
|
receiver,
|
||||||
holder_reg,
|
holder_reg,
|
||||||
@ -1233,7 +1227,7 @@ Object* CallStubCompiler::CompileCallGlobal(JSObject* object,
|
|||||||
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
|
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
|
||||||
|
|
||||||
// Jump to the cached code (tail call).
|
// Jump to the cached code (tail call).
|
||||||
__ IncrementCounter(&Counters::call_global_inline, 1, r1, r3);
|
__ IncrementCounter(&Counters::call_global_inline, 1, r3, r4);
|
||||||
ASSERT(function->is_compiled());
|
ASSERT(function->is_compiled());
|
||||||
Handle<Code> code(function->code());
|
Handle<Code> code(function->code());
|
||||||
ParameterCount expected(function->shared()->formal_parameter_count());
|
ParameterCount expected(function->shared()->formal_parameter_count());
|
||||||
@ -1309,7 +1303,7 @@ Object* StoreStubCompiler::CompileStoreCallback(JSObject* object,
|
|||||||
|
|
||||||
__ push(r1); // receiver
|
__ push(r1); // receiver
|
||||||
__ mov(ip, Operand(Handle<AccessorInfo>(callback))); // callback info
|
__ mov(ip, Operand(Handle<AccessorInfo>(callback))); // callback info
|
||||||
__ stm(db_w, sp, ip.bit() | r2.bit() | r0.bit());
|
__ Push(ip, r2, r0);
|
||||||
|
|
||||||
// Do tail-call to the runtime system.
|
// Do tail-call to the runtime system.
|
||||||
ExternalReference store_callback_property =
|
ExternalReference store_callback_property =
|
||||||
@ -1354,9 +1348,7 @@ Object* StoreStubCompiler::CompileStoreInterceptor(JSObject* receiver,
|
|||||||
// checks.
|
// checks.
|
||||||
ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
|
ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded());
|
||||||
|
|
||||||
__ push(r1); // receiver.
|
__ Push(r1, r2, r0); // Receiver, name, value.
|
||||||
__ push(r2); // name.
|
|
||||||
__ push(r0); // value.
|
|
||||||
|
|
||||||
// Do tail-call to the runtime system.
|
// Do tail-call to the runtime system.
|
||||||
ExternalReference store_ic_property =
|
ExternalReference store_ic_property =
|
||||||
@ -1559,35 +1551,34 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
|
|||||||
// ----------- S t a t e -------------
|
// ----------- S t a t e -------------
|
||||||
// -- r2 : name
|
// -- r2 : name
|
||||||
// -- lr : return address
|
// -- lr : return address
|
||||||
// -- [sp] : receiver
|
// -- r0 : receiver
|
||||||
|
// -- sp[0] : receiver
|
||||||
// -----------------------------------
|
// -----------------------------------
|
||||||
Label miss;
|
Label miss;
|
||||||
|
|
||||||
// Get the receiver from the stack.
|
|
||||||
__ ldr(r1, MemOperand(sp, 0 * kPointerSize));
|
|
||||||
|
|
||||||
// If the object is the holder then we know that it's a global
|
// If the object is the holder then we know that it's a global
|
||||||
// object which can only happen for contextual calls. In this case,
|
// object which can only happen for contextual calls. In this case,
|
||||||
// the receiver cannot be a smi.
|
// the receiver cannot be a smi.
|
||||||
if (object != holder) {
|
if (object != holder) {
|
||||||
__ tst(r1, Operand(kSmiTagMask));
|
__ tst(r0, Operand(kSmiTagMask));
|
||||||
__ b(eq, &miss);
|
__ b(eq, &miss);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check that the map of the global has not changed.
|
// Check that the map of the global has not changed.
|
||||||
CheckPrototypes(object, r1, holder, r3, r0, name, &miss);
|
CheckPrototypes(object, r0, holder, r3, r4, name, &miss);
|
||||||
|
|
||||||
// Get the value from the cell.
|
// Get the value from the cell.
|
||||||
__ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell)));
|
__ mov(r3, Operand(Handle<JSGlobalPropertyCell>(cell)));
|
||||||
__ ldr(r0, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
|
__ ldr(r4, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
|
||||||
|
|
||||||
// Check for deleted property if property can actually be deleted.
|
// Check for deleted property if property can actually be deleted.
|
||||||
if (!is_dont_delete) {
|
if (!is_dont_delete) {
|
||||||
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
|
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
|
||||||
__ cmp(r0, ip);
|
__ cmp(r4, ip);
|
||||||
__ b(eq, &miss);
|
__ b(eq, &miss);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
__ mov(r0, r4);
|
||||||
__ IncrementCounter(&Counters::named_load_global_inline, 1, r1, r3);
|
__ IncrementCounter(&Counters::named_load_global_inline, 1, r1, r3);
|
||||||
__ Ret();
|
__ Ret();
|
||||||
|
|
||||||
|
76
deps/v8/src/arm/virtual-frame-arm.cc
vendored
76
deps/v8/src/arm/virtual-frame-arm.cc
vendored
@ -88,7 +88,7 @@ void VirtualFrame::MergeTo(VirtualFrame* expected) {
|
|||||||
break;
|
break;
|
||||||
case CASE_NUMBER(NO_TOS_REGISTERS, R1_R0_TOS):
|
case CASE_NUMBER(NO_TOS_REGISTERS, R1_R0_TOS):
|
||||||
__ pop(r1);
|
__ pop(r1);
|
||||||
__ pop(r1);
|
__ pop(r0);
|
||||||
break;
|
break;
|
||||||
case CASE_NUMBER(R0_TOS, NO_TOS_REGISTERS):
|
case CASE_NUMBER(R0_TOS, NO_TOS_REGISTERS):
|
||||||
__ push(r0);
|
__ push(r0);
|
||||||
@ -121,8 +121,7 @@ void VirtualFrame::MergeTo(VirtualFrame* expected) {
|
|||||||
__ pop(r0);
|
__ pop(r0);
|
||||||
break;
|
break;
|
||||||
case CASE_NUMBER(R0_R1_TOS, NO_TOS_REGISTERS):
|
case CASE_NUMBER(R0_R1_TOS, NO_TOS_REGISTERS):
|
||||||
__ push(r1);
|
__ Push(r1, r0);
|
||||||
__ push(r0);
|
|
||||||
break;
|
break;
|
||||||
case CASE_NUMBER(R0_R1_TOS, R0_TOS):
|
case CASE_NUMBER(R0_R1_TOS, R0_TOS):
|
||||||
__ push(r1);
|
__ push(r1);
|
||||||
@ -137,8 +136,7 @@ void VirtualFrame::MergeTo(VirtualFrame* expected) {
|
|||||||
__ Swap(r0, r1, ip);
|
__ Swap(r0, r1, ip);
|
||||||
break;
|
break;
|
||||||
case CASE_NUMBER(R1_R0_TOS, NO_TOS_REGISTERS):
|
case CASE_NUMBER(R1_R0_TOS, NO_TOS_REGISTERS):
|
||||||
__ push(r0);
|
__ Push(r0, r1);
|
||||||
__ push(r1);
|
|
||||||
break;
|
break;
|
||||||
case CASE_NUMBER(R1_R0_TOS, R0_TOS):
|
case CASE_NUMBER(R1_R0_TOS, R0_TOS):
|
||||||
__ push(r0);
|
__ push(r0);
|
||||||
@ -270,6 +268,7 @@ void VirtualFrame::CallJSFunction(int arg_count) {
|
|||||||
|
|
||||||
|
|
||||||
void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
|
void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
|
||||||
|
ASSERT(SpilledScope::is_spilled());
|
||||||
Forget(arg_count);
|
Forget(arg_count);
|
||||||
ASSERT(cgen()->HasValidEntryRegisters());
|
ASSERT(cgen()->HasValidEntryRegisters());
|
||||||
__ CallRuntime(f, arg_count);
|
__ CallRuntime(f, arg_count);
|
||||||
@ -305,6 +304,18 @@ void VirtualFrame::CallLoadIC(RelocInfo::Mode mode) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void VirtualFrame::CallKeyedLoadIC() {
|
||||||
|
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
|
||||||
|
CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void VirtualFrame::CallKeyedStoreIC() {
|
||||||
|
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
|
||||||
|
CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void VirtualFrame::CallCodeObject(Handle<Code> code,
|
void VirtualFrame::CallCodeObject(Handle<Code> code,
|
||||||
RelocInfo::Mode rmode,
|
RelocInfo::Mode rmode,
|
||||||
int dropped_args) {
|
int dropped_args) {
|
||||||
@ -398,6 +409,61 @@ void VirtualFrame::EmitPop(Register reg) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void VirtualFrame::SpillAllButCopyTOSToR0() {
|
||||||
|
switch (top_of_stack_state_) {
|
||||||
|
case NO_TOS_REGISTERS:
|
||||||
|
__ ldr(r0, MemOperand(sp, 0));
|
||||||
|
break;
|
||||||
|
case R0_TOS:
|
||||||
|
__ push(r0);
|
||||||
|
break;
|
||||||
|
case R1_TOS:
|
||||||
|
__ push(r1);
|
||||||
|
__ mov(r0, r1);
|
||||||
|
break;
|
||||||
|
case R0_R1_TOS:
|
||||||
|
__ Push(r1, r0);
|
||||||
|
break;
|
||||||
|
case R1_R0_TOS:
|
||||||
|
__ Push(r0, r1);
|
||||||
|
__ mov(r0, r1);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
top_of_stack_state_ = NO_TOS_REGISTERS;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void VirtualFrame::SpillAllButCopyTOSToR1R0() {
|
||||||
|
switch (top_of_stack_state_) {
|
||||||
|
case NO_TOS_REGISTERS:
|
||||||
|
__ ldr(r1, MemOperand(sp, 0));
|
||||||
|
__ ldr(r0, MemOperand(sp, kPointerSize));
|
||||||
|
break;
|
||||||
|
case R0_TOS:
|
||||||
|
__ push(r0);
|
||||||
|
__ mov(r1, r0);
|
||||||
|
__ ldr(r0, MemOperand(sp, kPointerSize));
|
||||||
|
break;
|
||||||
|
case R1_TOS:
|
||||||
|
__ push(r1);
|
||||||
|
__ ldr(r0, MemOperand(sp, kPointerSize));
|
||||||
|
break;
|
||||||
|
case R0_R1_TOS:
|
||||||
|
__ Push(r1, r0);
|
||||||
|
__ Swap(r0, r1, ip);
|
||||||
|
break;
|
||||||
|
case R1_R0_TOS:
|
||||||
|
__ Push(r0, r1);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
top_of_stack_state_ = NO_TOS_REGISTERS;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
Register VirtualFrame::Peek() {
|
Register VirtualFrame::Peek() {
|
||||||
AssertIsNotSpilled();
|
AssertIsNotSpilled();
|
||||||
if (top_of_stack_state_ == NO_TOS_REGISTERS) {
|
if (top_of_stack_state_ == NO_TOS_REGISTERS) {
|
||||||
|
19
deps/v8/src/arm/virtual-frame-arm.h
vendored
19
deps/v8/src/arm/virtual-frame-arm.h
vendored
@ -308,10 +308,18 @@ class VirtualFrame : public ZoneObject {
|
|||||||
InvokeJSFlags flag,
|
InvokeJSFlags flag,
|
||||||
int arg_count);
|
int arg_count);
|
||||||
|
|
||||||
// Call load IC. Receiver on stack and property name in r2. Result returned in
|
// Call load IC. Receiver is on the stack and the property name is in r2.
|
||||||
// r0.
|
// Result is returned in r0.
|
||||||
void CallLoadIC(RelocInfo::Mode mode);
|
void CallLoadIC(RelocInfo::Mode mode);
|
||||||
|
|
||||||
|
// Call keyed load IC. Key and receiver are on the stack. Result is returned
|
||||||
|
// in r0.
|
||||||
|
void CallKeyedLoadIC();
|
||||||
|
|
||||||
|
// Call keyed store IC. Key and receiver are on the stack and the value is in
|
||||||
|
// r0. Result is returned in r0.
|
||||||
|
void CallKeyedStoreIC();
|
||||||
|
|
||||||
// Call into an IC stub given the number of arguments it removes
|
// Call into an IC stub given the number of arguments it removes
|
||||||
// from the stack. Register arguments to the IC stub are implicit,
|
// from the stack. Register arguments to the IC stub are implicit,
|
||||||
// and depend on the type of IC stub.
|
// and depend on the type of IC stub.
|
||||||
@ -340,6 +348,13 @@ class VirtualFrame : public ZoneObject {
|
|||||||
// must be copied to a scratch register before modification.
|
// must be copied to a scratch register before modification.
|
||||||
Register Peek();
|
Register Peek();
|
||||||
|
|
||||||
|
// Flushes all registers, but it puts a copy of the top-of-stack in r0.
|
||||||
|
void SpillAllButCopyTOSToR0();
|
||||||
|
|
||||||
|
// Flushes all registers, but it puts a copy of the top-of-stack in r1
|
||||||
|
// and the next value on the stack in r0.
|
||||||
|
void SpillAllButCopyTOSToR1R0();
|
||||||
|
|
||||||
// Pop and save an element from the top of the expression stack and
|
// Pop and save an element from the top of the expression stack and
|
||||||
// emit a corresponding pop instruction.
|
// emit a corresponding pop instruction.
|
||||||
void EmitPop(Register reg);
|
void EmitPop(Register reg);
|
||||||
|
84
deps/v8/src/array.js
vendored
84
deps/v8/src/array.js
vendored
@ -644,77 +644,26 @@ function ArraySort(comparefn) {
|
|||||||
// In-place QuickSort algorithm.
|
// In-place QuickSort algorithm.
|
||||||
// For short (length <= 22) arrays, insertion sort is used for efficiency.
|
// For short (length <= 22) arrays, insertion sort is used for efficiency.
|
||||||
|
|
||||||
var global_receiver;
|
if (!IS_FUNCTION(comparefn)) {
|
||||||
|
comparefn = function (x, y) {
|
||||||
function InsertionSortWithFunc(a, from, to) {
|
if (x === y) return 0;
|
||||||
for (var i = from + 1; i < to; i++) {
|
if (%_IsSmi(x) && %_IsSmi(y)) {
|
||||||
var element = a[i];
|
return %SmiLexicographicCompare(x, y);
|
||||||
for (var j = i - 1; j >= from; j--) {
|
|
||||||
var tmp = a[j];
|
|
||||||
var order = %_CallFunction(global_receiver, tmp, element, comparefn);
|
|
||||||
if (order > 0) {
|
|
||||||
a[j + 1] = tmp;
|
|
||||||
} else {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
a[j + 1] = element;
|
x = ToString(x);
|
||||||
}
|
y = ToString(y);
|
||||||
|
if (x == y) return 0;
|
||||||
|
else return x < y ? -1 : 1;
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
var global_receiver = %GetGlobalReceiver();
|
||||||
function QuickSortWithFunc(a, from, to) {
|
|
||||||
// Insertion sort is faster for short arrays.
|
|
||||||
if (to - from <= 22) {
|
|
||||||
InsertionSortWithFunc(a, from, to);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
var pivot_index = $floor($random() * (to - from)) + from;
|
|
||||||
var pivot = a[pivot_index];
|
|
||||||
// Issue 95: Keep the pivot element out of the comparisons to avoid
|
|
||||||
// infinite recursion if comparefn(pivot, pivot) != 0.
|
|
||||||
a[pivot_index] = a[from];
|
|
||||||
a[from] = pivot;
|
|
||||||
var low_end = from; // Upper bound of the elements lower than pivot.
|
|
||||||
var high_start = to; // Lower bound of the elements greater than pivot.
|
|
||||||
// From low_end to i are elements equal to pivot.
|
|
||||||
// From i to high_start are elements that haven't been compared yet.
|
|
||||||
for (var i = from + 1; i < high_start; ) {
|
|
||||||
var element = a[i];
|
|
||||||
var order = %_CallFunction(global_receiver, element, pivot, comparefn);
|
|
||||||
if (order < 0) {
|
|
||||||
a[i] = a[low_end];
|
|
||||||
a[low_end] = element;
|
|
||||||
i++;
|
|
||||||
low_end++;
|
|
||||||
} else if (order > 0) {
|
|
||||||
high_start--;
|
|
||||||
a[i] = a[high_start];
|
|
||||||
a[high_start] = element;
|
|
||||||
} else { // order == 0
|
|
||||||
i++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
QuickSortWithFunc(a, from, low_end);
|
|
||||||
QuickSortWithFunc(a, high_start, to);
|
|
||||||
}
|
|
||||||
|
|
||||||
function Compare(x,y) {
|
|
||||||
if (x === y) return 0;
|
|
||||||
if (%_IsSmi(x) && %_IsSmi(y)) {
|
|
||||||
return %SmiLexicographicCompare(x, y);
|
|
||||||
}
|
|
||||||
x = ToString(x);
|
|
||||||
y = ToString(y);
|
|
||||||
if (x == y) return 0;
|
|
||||||
else return x < y ? -1 : 1;
|
|
||||||
};
|
|
||||||
|
|
||||||
function InsertionSort(a, from, to) {
|
function InsertionSort(a, from, to) {
|
||||||
for (var i = from + 1; i < to; i++) {
|
for (var i = from + 1; i < to; i++) {
|
||||||
var element = a[i];
|
var element = a[i];
|
||||||
for (var j = i - 1; j >= from; j--) {
|
for (var j = i - 1; j >= from; j--) {
|
||||||
var tmp = a[j];
|
var tmp = a[j];
|
||||||
var order = Compare(tmp, element);
|
var order = %_CallFunction(global_receiver, tmp, element, comparefn);
|
||||||
if (order > 0) {
|
if (order > 0) {
|
||||||
a[j + 1] = tmp;
|
a[j + 1] = tmp;
|
||||||
} else {
|
} else {
|
||||||
@ -743,7 +692,7 @@ function ArraySort(comparefn) {
|
|||||||
// From i to high_start are elements that haven't been compared yet.
|
// From i to high_start are elements that haven't been compared yet.
|
||||||
for (var i = from + 1; i < high_start; ) {
|
for (var i = from + 1; i < high_start; ) {
|
||||||
var element = a[i];
|
var element = a[i];
|
||||||
var order = Compare(element, pivot);
|
var order = %_CallFunction(global_receiver, element, pivot, comparefn);
|
||||||
if (order < 0) {
|
if (order < 0) {
|
||||||
a[i] = a[low_end];
|
a[i] = a[low_end];
|
||||||
a[low_end] = element;
|
a[low_end] = element;
|
||||||
@ -903,12 +852,7 @@ function ArraySort(comparefn) {
|
|||||||
num_non_undefined = SafeRemoveArrayHoles(this);
|
num_non_undefined = SafeRemoveArrayHoles(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
if(IS_FUNCTION(comparefn)) {
|
QuickSort(this, 0, num_non_undefined);
|
||||||
global_receiver = %GetGlobalReceiver();
|
|
||||||
QuickSortWithFunc(this, 0, num_non_undefined);
|
|
||||||
} else {
|
|
||||||
QuickSort(this, 0, num_non_undefined);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!is_array && (num_non_undefined + 1 < max_prototype_element)) {
|
if (!is_array && (num_non_undefined + 1 < max_prototype_element)) {
|
||||||
// For compatibility with JSC, we shadow any elements in the prototype
|
// For compatibility with JSC, we shadow any elements in the prototype
|
||||||
|
31
deps/v8/src/builtins.cc
vendored
31
deps/v8/src/builtins.cc
vendored
@ -300,35 +300,6 @@ static void FillWithHoles(FixedArray* dst, int from, int to) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static FixedArray* LeftTrimFixedArray(FixedArray* elms) {
|
|
||||||
// For now this trick is only applied to fixed arrays in new space.
|
|
||||||
// In large object space the object's start must coincide with chunk
|
|
||||||
// and thus the trick is just not applicable.
|
|
||||||
// In old space we do not use this trick to avoid dealing with
|
|
||||||
// remembered sets.
|
|
||||||
ASSERT(Heap::new_space()->Contains(elms));
|
|
||||||
|
|
||||||
STATIC_ASSERT(FixedArray::kMapOffset == 0);
|
|
||||||
STATIC_ASSERT(FixedArray::kLengthOffset == kPointerSize);
|
|
||||||
STATIC_ASSERT(FixedArray::kHeaderSize == 2 * kPointerSize);
|
|
||||||
|
|
||||||
Object** former_start = HeapObject::RawField(elms, 0);
|
|
||||||
|
|
||||||
const int len = elms->length();
|
|
||||||
|
|
||||||
// Technically in new space this write might be omitted (except for
|
|
||||||
// debug mode which iterates through the heap), but to play safer
|
|
||||||
// we still do it.
|
|
||||||
former_start[0] = Heap::raw_unchecked_one_pointer_filler_map();
|
|
||||||
|
|
||||||
former_start[1] = Heap::fixed_array_map();
|
|
||||||
former_start[2] = reinterpret_cast<Object*>(len - 1);
|
|
||||||
|
|
||||||
ASSERT_EQ(elms->address() + kPointerSize, (elms + kPointerSize)->address());
|
|
||||||
return elms + kPointerSize;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static FixedArray* LeftTrimFixedArray(FixedArray* elms, int to_trim) {
|
static FixedArray* LeftTrimFixedArray(FixedArray* elms, int to_trim) {
|
||||||
// For now this trick is only applied to fixed arrays in new space.
|
// For now this trick is only applied to fixed arrays in new space.
|
||||||
// In large object space the object's start must coincide with chunk
|
// In large object space the object's start must coincide with chunk
|
||||||
@ -527,7 +498,7 @@ BUILTIN(ArrayShift) {
|
|||||||
if (Heap::new_space()->Contains(elms)) {
|
if (Heap::new_space()->Contains(elms)) {
|
||||||
// As elms still in the same space they used to be (new space),
|
// As elms still in the same space they used to be (new space),
|
||||||
// there is no need to update remembered set.
|
// there is no need to update remembered set.
|
||||||
array->set_elements(LeftTrimFixedArray(elms), SKIP_WRITE_BARRIER);
|
array->set_elements(LeftTrimFixedArray(elms, 1), SKIP_WRITE_BARRIER);
|
||||||
} else {
|
} else {
|
||||||
// Shift the elements.
|
// Shift the elements.
|
||||||
AssertNoAllocation no_gc;
|
AssertNoAllocation no_gc;
|
||||||
|
2
deps/v8/src/codegen.cc
vendored
2
deps/v8/src/codegen.cc
vendored
@ -77,13 +77,11 @@ void CodeGenerator::ProcessDeferred() {
|
|||||||
}
|
}
|
||||||
// Generate the code.
|
// Generate the code.
|
||||||
Comment cmnt(masm_, code->comment());
|
Comment cmnt(masm_, code->comment());
|
||||||
code->BeforeGenerate();
|
|
||||||
masm_->bind(code->entry_label());
|
masm_->bind(code->entry_label());
|
||||||
code->SaveRegisters();
|
code->SaveRegisters();
|
||||||
code->Generate();
|
code->Generate();
|
||||||
code->RestoreRegisters();
|
code->RestoreRegisters();
|
||||||
masm_->jmp(code->exit_label());
|
masm_->jmp(code->exit_label());
|
||||||
code->AfterGenerate();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
3
deps/v8/src/codegen.h
vendored
3
deps/v8/src/codegen.h
vendored
@ -212,9 +212,6 @@ class DeferredCode: public ZoneObject {
|
|||||||
void SaveRegisters();
|
void SaveRegisters();
|
||||||
void RestoreRegisters();
|
void RestoreRegisters();
|
||||||
|
|
||||||
virtual void BeforeGenerate() { }
|
|
||||||
virtual void AfterGenerate() { }
|
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
MacroAssembler* masm_;
|
MacroAssembler* masm_;
|
||||||
|
|
||||||
|
7
deps/v8/src/debug.cc
vendored
7
deps/v8/src/debug.cc
vendored
@ -2133,6 +2133,13 @@ void Debugger::ProcessDebugEvent(v8::DebugEvent event,
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
Handle<Context> Debugger::GetDebugContext() {
|
||||||
|
never_unload_debugger_ = true;
|
||||||
|
EnterDebugger debugger;
|
||||||
|
return Debug::debug_context();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void Debugger::UnloadDebugger() {
|
void Debugger::UnloadDebugger() {
|
||||||
// Make sure that there are no breakpoints left.
|
// Make sure that there are no breakpoints left.
|
||||||
Debug::ClearAllBreakPoints();
|
Debug::ClearAllBreakPoints();
|
||||||
|
3
deps/v8/src/debug.h
vendored
3
deps/v8/src/debug.h
vendored
@ -665,9 +665,12 @@ class Debugger {
|
|||||||
|
|
||||||
static void CallMessageDispatchHandler();
|
static void CallMessageDispatchHandler();
|
||||||
|
|
||||||
|
static Handle<Context> GetDebugContext();
|
||||||
|
|
||||||
// Unload the debugger if possible. Only called when no debugger is currently
|
// Unload the debugger if possible. Only called when no debugger is currently
|
||||||
// active.
|
// active.
|
||||||
static void UnloadDebugger();
|
static void UnloadDebugger();
|
||||||
|
friend void ForceUnloadDebugger(); // In test-debug.cc
|
||||||
|
|
||||||
inline static bool EventActive(v8::DebugEvent event) {
|
inline static bool EventActive(v8::DebugEvent event) {
|
||||||
ScopedLock with(debugger_access_);
|
ScopedLock with(debugger_access_);
|
||||||
|
1
deps/v8/src/flag-definitions.h
vendored
1
deps/v8/src/flag-definitions.h
vendored
@ -232,6 +232,7 @@ DEFINE_bool(optimize_ast, true, "optimize the ast")
|
|||||||
|
|
||||||
// simulator-arm.cc and simulator-mips.cc
|
// simulator-arm.cc and simulator-mips.cc
|
||||||
DEFINE_bool(trace_sim, false, "Trace simulator execution")
|
DEFINE_bool(trace_sim, false, "Trace simulator execution")
|
||||||
|
DEFINE_bool(check_icache, false, "Check icache flushes in ARM simulator")
|
||||||
DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions")
|
DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions")
|
||||||
DEFINE_int(sim_stack_alignment, 8,
|
DEFINE_int(sim_stack_alignment, 8,
|
||||||
"Stack alingment in bytes in simulator (4 or 8, 8 is default)")
|
"Stack alingment in bytes in simulator (4 or 8, 8 is default)")
|
||||||
|
22
deps/v8/src/ia32/codegen-ia32.cc
vendored
22
deps/v8/src/ia32/codegen-ia32.cc
vendored
@ -8340,7 +8340,7 @@ Result CodeGenerator::EmitKeyedLoad() {
|
|||||||
deferred->Branch(not_equal);
|
deferred->Branch(not_equal);
|
||||||
|
|
||||||
// Shift the key to get the actual index value and check that
|
// Shift the key to get the actual index value and check that
|
||||||
// it is within bounds.
|
// it is within bounds. Use unsigned comparison to handle negative keys.
|
||||||
__ mov(result.reg(), key.reg());
|
__ mov(result.reg(), key.reg());
|
||||||
__ SmiUntag(result.reg());
|
__ SmiUntag(result.reg());
|
||||||
__ cmp(result.reg(),
|
__ cmp(result.reg(),
|
||||||
@ -8413,27 +8413,27 @@ Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
|
|||||||
deferred->Branch(not_zero);
|
deferred->Branch(not_zero);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check that the key is a non-negative smi.
|
// Check that the key is a smi.
|
||||||
__ test(key.reg(), Immediate(kSmiTagMask | kSmiSignMask));
|
if (!key.is_smi()) {
|
||||||
deferred->Branch(not_zero);
|
__ test(key.reg(), Immediate(kSmiTagMask));
|
||||||
|
deferred->Branch(not_zero);
|
||||||
|
} else {
|
||||||
|
if (FLAG_debug_code) __ AbortIfNotSmi(key.reg());
|
||||||
|
}
|
||||||
|
|
||||||
// Check that the receiver is not a smi.
|
// Check that the receiver is not a smi.
|
||||||
__ test(receiver.reg(), Immediate(kSmiTagMask));
|
__ test(receiver.reg(), Immediate(kSmiTagMask));
|
||||||
deferred->Branch(zero);
|
deferred->Branch(zero);
|
||||||
|
|
||||||
// Check that the receiver is a JSArray.
|
// Check that the receiver is a JSArray.
|
||||||
__ mov(tmp.reg(),
|
__ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, tmp.reg());
|
||||||
FieldOperand(receiver.reg(), HeapObject::kMapOffset));
|
|
||||||
__ movzx_b(tmp.reg(),
|
|
||||||
FieldOperand(tmp.reg(), Map::kInstanceTypeOffset));
|
|
||||||
__ cmp(tmp.reg(), JS_ARRAY_TYPE);
|
|
||||||
deferred->Branch(not_equal);
|
deferred->Branch(not_equal);
|
||||||
|
|
||||||
// Check that the key is within bounds. Both the key and the length of
|
// Check that the key is within bounds. Both the key and the length of
|
||||||
// the JSArray are smis.
|
// the JSArray are smis. Use unsigned comparison to handle negative keys.
|
||||||
__ cmp(key.reg(),
|
__ cmp(key.reg(),
|
||||||
FieldOperand(receiver.reg(), JSArray::kLengthOffset));
|
FieldOperand(receiver.reg(), JSArray::kLengthOffset));
|
||||||
deferred->Branch(greater_equal);
|
deferred->Branch(above_equal);
|
||||||
|
|
||||||
// Get the elements array from the receiver and check that it is not a
|
// Get the elements array from the receiver and check that it is not a
|
||||||
// dictionary.
|
// dictionary.
|
||||||
|
16
deps/v8/src/ia32/stub-cache-ia32.cc
vendored
16
deps/v8/src/ia32/stub-cache-ia32.cc
vendored
@ -1241,6 +1241,11 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
|
|||||||
// -----------------------------------
|
// -----------------------------------
|
||||||
ASSERT(check == RECEIVER_MAP_CHECK);
|
ASSERT(check == RECEIVER_MAP_CHECK);
|
||||||
|
|
||||||
|
// If object is not an array, bail out to regular call.
|
||||||
|
if (!object->IsJSArray()) {
|
||||||
|
return Heap::undefined_value();
|
||||||
|
}
|
||||||
|
|
||||||
Label miss;
|
Label miss;
|
||||||
|
|
||||||
// Get the receiver from the stack.
|
// Get the receiver from the stack.
|
||||||
@ -1389,6 +1394,11 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
|
|||||||
// -----------------------------------
|
// -----------------------------------
|
||||||
ASSERT(check == RECEIVER_MAP_CHECK);
|
ASSERT(check == RECEIVER_MAP_CHECK);
|
||||||
|
|
||||||
|
// If object is not an array, bail out to regular call.
|
||||||
|
if (!object->IsJSArray()) {
|
||||||
|
return Heap::undefined_value();
|
||||||
|
}
|
||||||
|
|
||||||
Label miss, empty_array, call_builtin;
|
Label miss, empty_array, call_builtin;
|
||||||
|
|
||||||
// Get the receiver from the stack.
|
// Get the receiver from the stack.
|
||||||
@ -1476,7 +1486,11 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
|
|||||||
if (function_info->HasCustomCallGenerator()) {
|
if (function_info->HasCustomCallGenerator()) {
|
||||||
CustomCallGenerator generator =
|
CustomCallGenerator generator =
|
||||||
ToCData<CustomCallGenerator>(function_info->function_data());
|
ToCData<CustomCallGenerator>(function_info->function_data());
|
||||||
return generator(this, object, holder, function, name, check);
|
Object* result = generator(this, object, holder, function, name, check);
|
||||||
|
// undefined means bail out to regular compiler.
|
||||||
|
if (!result->IsUndefined()) {
|
||||||
|
return result;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Label miss_in_smi_check;
|
Label miss_in_smi_check;
|
||||||
|
19
deps/v8/src/jsregexp.cc
vendored
19
deps/v8/src/jsregexp.cc
vendored
@ -4872,17 +4872,18 @@ void Analysis::VisitAssertion(AssertionNode* that) {
|
|||||||
|
|
||||||
SetRelation word_relation =
|
SetRelation word_relation =
|
||||||
CharacterRange::WordCharacterRelation(following_chars);
|
CharacterRange::WordCharacterRelation(following_chars);
|
||||||
if (word_relation.ContainedIn()) {
|
if (word_relation.Disjoint()) {
|
||||||
// Following character is definitely a word character.
|
// Includes the case where following_chars is empty (e.g., end-of-input).
|
||||||
type = (type == AssertionNode::AT_BOUNDARY) ?
|
|
||||||
AssertionNode::AFTER_NONWORD_CHARACTER :
|
|
||||||
AssertionNode::AFTER_WORD_CHARACTER;
|
|
||||||
that->set_type(type);
|
|
||||||
} else if (word_relation.Disjoint()) {
|
|
||||||
// Following character is definitely *not* a word character.
|
// Following character is definitely *not* a word character.
|
||||||
type = (type == AssertionNode::AT_BOUNDARY) ?
|
type = (type == AssertionNode::AT_BOUNDARY) ?
|
||||||
AssertionNode::AFTER_WORD_CHARACTER :
|
AssertionNode::AFTER_WORD_CHARACTER :
|
||||||
AssertionNode::AFTER_NONWORD_CHARACTER;
|
AssertionNode::AFTER_NONWORD_CHARACTER;
|
||||||
|
that->set_type(type);
|
||||||
|
} else if (word_relation.ContainedIn()) {
|
||||||
|
// Following character is definitely a word character.
|
||||||
|
type = (type == AssertionNode::AT_BOUNDARY) ?
|
||||||
|
AssertionNode::AFTER_NONWORD_CHARACTER :
|
||||||
|
AssertionNode::AFTER_WORD_CHARACTER;
|
||||||
that->set_type(type);
|
that->set_type(type);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
20
deps/v8/src/liveedit.cc
vendored
20
deps/v8/src/liveedit.cc
vendored
@ -49,7 +49,7 @@ namespace internal {
|
|||||||
// that helps building the chunk list.
|
// that helps building the chunk list.
|
||||||
class Differencer {
|
class Differencer {
|
||||||
public:
|
public:
|
||||||
explicit Differencer(Compare::Input* input)
|
explicit Differencer(Comparator::Input* input)
|
||||||
: input_(input), len1_(input->getLength1()), len2_(input->getLength2()) {
|
: input_(input), len1_(input->getLength1()), len2_(input->getLength2()) {
|
||||||
buffer_ = NewArray<int>(len1_ * len2_);
|
buffer_ = NewArray<int>(len1_ * len2_);
|
||||||
}
|
}
|
||||||
@ -70,7 +70,7 @@ class Differencer {
|
|||||||
CompareUpToTail(0, 0);
|
CompareUpToTail(0, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SaveResult(Compare::Output* chunk_writer) {
|
void SaveResult(Comparator::Output* chunk_writer) {
|
||||||
ResultWriter writer(chunk_writer);
|
ResultWriter writer(chunk_writer);
|
||||||
|
|
||||||
int pos1 = 0;
|
int pos1 = 0;
|
||||||
@ -112,7 +112,7 @@ class Differencer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Compare::Input* input_;
|
Comparator::Input* input_;
|
||||||
int* buffer_;
|
int* buffer_;
|
||||||
int len1_;
|
int len1_;
|
||||||
int len2_;
|
int len2_;
|
||||||
@ -195,7 +195,7 @@ class Differencer {
|
|||||||
|
|
||||||
class ResultWriter {
|
class ResultWriter {
|
||||||
public:
|
public:
|
||||||
explicit ResultWriter(Compare::Output* chunk_writer)
|
explicit ResultWriter(Comparator::Output* chunk_writer)
|
||||||
: chunk_writer_(chunk_writer), pos1_(0), pos2_(0),
|
: chunk_writer_(chunk_writer), pos1_(0), pos2_(0),
|
||||||
pos1_begin_(-1), pos2_begin_(-1), has_open_chunk_(false) {
|
pos1_begin_(-1), pos2_begin_(-1), has_open_chunk_(false) {
|
||||||
}
|
}
|
||||||
@ -217,7 +217,7 @@ class Differencer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Compare::Output* chunk_writer_;
|
Comparator::Output* chunk_writer_;
|
||||||
int pos1_;
|
int pos1_;
|
||||||
int pos2_;
|
int pos2_;
|
||||||
int pos1_begin_;
|
int pos1_begin_;
|
||||||
@ -243,8 +243,8 @@ class Differencer {
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
void Compare::CalculateDifference(Compare::Input* input,
|
void Comparator::CalculateDifference(Comparator::Input* input,
|
||||||
Compare::Output* result_writer) {
|
Comparator::Output* result_writer) {
|
||||||
Differencer differencer(input);
|
Differencer differencer(input);
|
||||||
differencer.Initialize();
|
differencer.Initialize();
|
||||||
differencer.FillTable();
|
differencer.FillTable();
|
||||||
@ -312,7 +312,7 @@ class LineEndsWrapper {
|
|||||||
|
|
||||||
|
|
||||||
// Represents 2 strings as 2 arrays of lines.
|
// Represents 2 strings as 2 arrays of lines.
|
||||||
class LineArrayCompareInput : public Compare::Input {
|
class LineArrayCompareInput : public Comparator::Input {
|
||||||
public:
|
public:
|
||||||
LineArrayCompareInput(Handle<String> s1, Handle<String> s2,
|
LineArrayCompareInput(Handle<String> s1, Handle<String> s2,
|
||||||
LineEndsWrapper line_ends1, LineEndsWrapper line_ends2)
|
LineEndsWrapper line_ends1, LineEndsWrapper line_ends2)
|
||||||
@ -347,7 +347,7 @@ class LineArrayCompareInput : public Compare::Input {
|
|||||||
|
|
||||||
// Stores compare result in JSArray. Each chunk is stored as 3 array elements:
|
// Stores compare result in JSArray. Each chunk is stored as 3 array elements:
|
||||||
// (pos1_begin, pos1_end, pos2_end).
|
// (pos1_begin, pos1_end, pos2_end).
|
||||||
class LineArrayCompareOutput : public Compare::Output {
|
class LineArrayCompareOutput : public Comparator::Output {
|
||||||
public:
|
public:
|
||||||
LineArrayCompareOutput(LineEndsWrapper line_ends1, LineEndsWrapper line_ends2)
|
LineArrayCompareOutput(LineEndsWrapper line_ends1, LineEndsWrapper line_ends2)
|
||||||
: array_(Factory::NewJSArray(10)), current_size_(0),
|
: array_(Factory::NewJSArray(10)), current_size_(0),
|
||||||
@ -388,7 +388,7 @@ Handle<JSArray> LiveEdit::CompareStringsLinewise(Handle<String> s1,
|
|||||||
LineArrayCompareInput input(s1, s2, line_ends1, line_ends2);
|
LineArrayCompareInput input(s1, s2, line_ends1, line_ends2);
|
||||||
LineArrayCompareOutput output(line_ends1, line_ends2);
|
LineArrayCompareOutput output(line_ends1, line_ends2);
|
||||||
|
|
||||||
Compare::CalculateDifference(&input, &output);
|
Comparator::CalculateDifference(&input, &output);
|
||||||
|
|
||||||
return output.GetResult();
|
return output.GetResult();
|
||||||
}
|
}
|
||||||
|
2
deps/v8/src/liveedit.h
vendored
2
deps/v8/src/liveedit.h
vendored
@ -118,7 +118,7 @@ class LiveEdit : AllStatic {
|
|||||||
|
|
||||||
|
|
||||||
// A general-purpose comparator between 2 arrays.
|
// A general-purpose comparator between 2 arrays.
|
||||||
class Compare {
|
class Comparator {
|
||||||
public:
|
public:
|
||||||
|
|
||||||
// Holds 2 arrays of some elements allowing to compare any pair of
|
// Holds 2 arrays of some elements allowing to compare any pair of
|
||||||
|
4
deps/v8/src/mark-compact.cc
vendored
4
deps/v8/src/mark-compact.cc
vendored
@ -1211,8 +1211,8 @@ static void SweepNewSpace(NewSpace* space) {
|
|||||||
size = object->Size();
|
size = object->Size();
|
||||||
survivors_size += size;
|
survivors_size += size;
|
||||||
|
|
||||||
if (Heap::ShouldBePromoted(current, size) &&
|
// Aggressively promote young survivors to the old space.
|
||||||
TryPromoteObject(object, size)) {
|
if (TryPromoteObject(object, size)) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
4
deps/v8/src/mips/assembler-mips.h
vendored
4
deps/v8/src/mips/assembler-mips.h
vendored
@ -522,7 +522,9 @@ class Assembler : public Malloced {
|
|||||||
|
|
||||||
int32_t pc_offset() const { return pc_ - buffer_; }
|
int32_t pc_offset() const { return pc_ - buffer_; }
|
||||||
int32_t current_position() const { return current_position_; }
|
int32_t current_position() const { return current_position_; }
|
||||||
int32_t current_statement_position() const { return current_position_; }
|
int32_t current_statement_position() const {
|
||||||
|
return current_statement_position_;
|
||||||
|
}
|
||||||
|
|
||||||
// Check if there is less than kGap bytes available in the buffer.
|
// Check if there is less than kGap bytes available in the buffer.
|
||||||
// If this is the case, we need to grow the buffer before emitting
|
// If this is the case, we need to grow the buffer before emitting
|
||||||
|
4
deps/v8/src/regexp-macro-assembler-tracer.cc
vendored
4
deps/v8/src/regexp-macro-assembler-tracer.cc
vendored
@ -37,8 +37,8 @@ RegExpMacroAssemblerTracer::RegExpMacroAssemblerTracer(
|
|||||||
RegExpMacroAssembler* assembler) :
|
RegExpMacroAssembler* assembler) :
|
||||||
assembler_(assembler) {
|
assembler_(assembler) {
|
||||||
unsigned int type = assembler->Implementation();
|
unsigned int type = assembler->Implementation();
|
||||||
ASSERT(type < 3);
|
ASSERT(type < 4);
|
||||||
const char* impl_names[3] = {"IA32", "ARM", "Bytecode"};
|
const char* impl_names[4] = {"IA32", "ARM", "X64", "Bytecode"};
|
||||||
PrintF("RegExpMacroAssembler%s();\n", impl_names[type]);
|
PrintF("RegExpMacroAssembler%s();\n", impl_names[type]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
19
deps/v8/src/runtime.cc
vendored
19
deps/v8/src/runtime.cc
vendored
@ -3162,7 +3162,7 @@ static bool SearchStringMultiple(Vector<schar> subject,
|
|||||||
StringSearchStrategy strategy =
|
StringSearchStrategy strategy =
|
||||||
InitializeStringSearch(pattern_string, is_ascii);
|
InitializeStringSearch(pattern_string, is_ascii);
|
||||||
switch (strategy) {
|
switch (strategy) {
|
||||||
case SEARCH_FAIL: return false;
|
case SEARCH_FAIL: break;
|
||||||
case SEARCH_SHORT:
|
case SEARCH_SHORT:
|
||||||
while (pos <= max_search_start) {
|
while (pos <= max_search_start) {
|
||||||
if (!builder->HasCapacity(kMaxBuilderEntriesPerRegExpMatch)) {
|
if (!builder->HasCapacity(kMaxBuilderEntriesPerRegExpMatch)) {
|
||||||
@ -3189,16 +3189,17 @@ static bool SearchStringMultiple(Vector<schar> subject,
|
|||||||
case SEARCH_LONG:
|
case SEARCH_LONG:
|
||||||
while (pos <= max_search_start) {
|
while (pos <= max_search_start) {
|
||||||
if (!builder->HasCapacity(kMaxBuilderEntriesPerRegExpMatch)) {
|
if (!builder->HasCapacity(kMaxBuilderEntriesPerRegExpMatch)) {
|
||||||
*match_pos = pos;
|
*match_pos = pos;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
int new_pos = ComplexIndexOf(subject,
|
int match_end = pos + pattern_length;
|
||||||
pattern_string,
|
int new_pos = ComplexIndexOf(subject, pattern_string, match_end);
|
||||||
pos + pattern_length);
|
|
||||||
if (new_pos >= 0) {
|
if (new_pos >= 0) {
|
||||||
// A match has been found.
|
// A match has been found.
|
||||||
if (new_pos > pos) {
|
if (new_pos > match_end) {
|
||||||
ReplacementStringBuilder::AddSubjectSlice(builder, pos, new_pos);
|
ReplacementStringBuilder::AddSubjectSlice(builder,
|
||||||
|
match_end,
|
||||||
|
new_pos);
|
||||||
}
|
}
|
||||||
pos = new_pos;
|
pos = new_pos;
|
||||||
builder->Add(pattern);
|
builder->Add(pattern);
|
||||||
|
4
deps/v8/src/version.cc
vendored
4
deps/v8/src/version.cc
vendored
@ -1,4 +1,4 @@
|
|||||||
// Copyright 2010 the V8 project authors. All rights reserved.
|
// Copyright 2008 the V8 project authors. All rights reserved.
|
||||||
// Redistribution and use in source and binary forms, with or without
|
// Redistribution and use in source and binary forms, with or without
|
||||||
// modification, are permitted provided that the following conditions are
|
// modification, are permitted provided that the following conditions are
|
||||||
// met:
|
// met:
|
||||||
@ -34,7 +34,7 @@
|
|||||||
// cannot be changed without changing the SCons build script.
|
// cannot be changed without changing the SCons build script.
|
||||||
#define MAJOR_VERSION 2
|
#define MAJOR_VERSION 2
|
||||||
#define MINOR_VERSION 2
|
#define MINOR_VERSION 2
|
||||||
#define BUILD_NUMBER 5
|
#define BUILD_NUMBER 6
|
||||||
#define PATCH_LEVEL 0
|
#define PATCH_LEVEL 0
|
||||||
#define CANDIDATE_VERSION false
|
#define CANDIDATE_VERSION false
|
||||||
|
|
||||||
|
362
deps/v8/src/x64/codegen-x64.cc
vendored
362
deps/v8/src/x64/codegen-x64.cc
vendored
@ -202,11 +202,21 @@ class FloatingPointHelper : public AllStatic {
|
|||||||
// Code pattern for loading a floating point value. Input value must
|
// Code pattern for loading a floating point value. Input value must
|
||||||
// be either a smi or a heap number object (fp value). Requirements:
|
// be either a smi or a heap number object (fp value). Requirements:
|
||||||
// operand in src register. Returns operand as floating point number
|
// operand in src register. Returns operand as floating point number
|
||||||
// in XMM register
|
// in XMM register. May destroy src register.
|
||||||
static void LoadFloatOperand(MacroAssembler* masm,
|
static void LoadFloatOperand(MacroAssembler* masm,
|
||||||
Register src,
|
Register src,
|
||||||
XMMRegister dst);
|
XMMRegister dst);
|
||||||
|
|
||||||
|
// Code pattern for loading a possible number into a XMM register.
|
||||||
|
// If the contents of src is not a number, control branches to
|
||||||
|
// the Label not_number. If contents of src is a smi or a heap number
|
||||||
|
// object (fp value), it is loaded into the XMM register as a double.
|
||||||
|
// The register src is not changed, and src may not be kScratchRegister.
|
||||||
|
static void LoadFloatOperand(MacroAssembler* masm,
|
||||||
|
Register src,
|
||||||
|
XMMRegister dst,
|
||||||
|
Label *not_number);
|
||||||
|
|
||||||
// Code pattern for loading floating point values. Input values must
|
// Code pattern for loading floating point values. Input values must
|
||||||
// be either smi or heap number objects (fp values). Requirements:
|
// be either smi or heap number objects (fp values). Requirements:
|
||||||
// operand_1 in rdx, operand_2 in rax; Returns operands as
|
// operand_1 in rdx, operand_2 in rax; Returns operands as
|
||||||
@ -5320,6 +5330,22 @@ static bool CouldBeNaN(const Result& result) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// Convert from signed to unsigned comparison to match the way EFLAGS are set
|
||||||
|
// by FPU and XMM compare instructions.
|
||||||
|
static Condition DoubleCondition(Condition cc) {
|
||||||
|
switch (cc) {
|
||||||
|
case less: return below;
|
||||||
|
case equal: return equal;
|
||||||
|
case less_equal: return below_equal;
|
||||||
|
case greater: return above;
|
||||||
|
case greater_equal: return above_equal;
|
||||||
|
default: UNREACHABLE();
|
||||||
|
}
|
||||||
|
UNREACHABLE();
|
||||||
|
return equal;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void CodeGenerator::Comparison(AstNode* node,
|
void CodeGenerator::Comparison(AstNode* node,
|
||||||
Condition cc,
|
Condition cc,
|
||||||
bool strict,
|
bool strict,
|
||||||
@ -5391,7 +5417,7 @@ void CodeGenerator::Comparison(AstNode* node,
|
|||||||
left_side = right_side;
|
left_side = right_side;
|
||||||
right_side = temp;
|
right_side = temp;
|
||||||
cc = ReverseCondition(cc);
|
cc = ReverseCondition(cc);
|
||||||
// This may reintroduce greater or less_equal as the value of cc.
|
// This may re-introduce greater or less_equal as the value of cc.
|
||||||
// CompareStub and the inline code both support all values of cc.
|
// CompareStub and the inline code both support all values of cc.
|
||||||
}
|
}
|
||||||
// Implement comparison against a constant Smi, inlining the case
|
// Implement comparison against a constant Smi, inlining the case
|
||||||
@ -5434,22 +5460,13 @@ void CodeGenerator::Comparison(AstNode* node,
|
|||||||
// Jump to builtin for NaN.
|
// Jump to builtin for NaN.
|
||||||
not_number.Branch(parity_even, &left_side);
|
not_number.Branch(parity_even, &left_side);
|
||||||
left_side.Unuse();
|
left_side.Unuse();
|
||||||
Condition double_cc = cc;
|
dest->true_target()->Branch(DoubleCondition(cc));
|
||||||
switch (cc) {
|
|
||||||
case less: double_cc = below; break;
|
|
||||||
case equal: double_cc = equal; break;
|
|
||||||
case less_equal: double_cc = below_equal; break;
|
|
||||||
case greater: double_cc = above; break;
|
|
||||||
case greater_equal: double_cc = above_equal; break;
|
|
||||||
default: UNREACHABLE();
|
|
||||||
}
|
|
||||||
dest->true_target()->Branch(double_cc);
|
|
||||||
dest->false_target()->Jump();
|
dest->false_target()->Jump();
|
||||||
not_number.Bind(&left_side);
|
not_number.Bind(&left_side);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Setup and call the compare stub.
|
// Setup and call the compare stub.
|
||||||
CompareStub stub(cc, strict);
|
CompareStub stub(cc, strict, kCantBothBeNaN);
|
||||||
Result result = frame_->CallStub(&stub, &left_side, &right_side);
|
Result result = frame_->CallStub(&stub, &left_side, &right_side);
|
||||||
result.ToRegister();
|
result.ToRegister();
|
||||||
__ testq(result.reg(), result.reg());
|
__ testq(result.reg(), result.reg());
|
||||||
@ -5642,17 +5659,34 @@ void CodeGenerator::Comparison(AstNode* node,
|
|||||||
// If either side is a non-smi constant, skip the smi check.
|
// If either side is a non-smi constant, skip the smi check.
|
||||||
bool known_non_smi =
|
bool known_non_smi =
|
||||||
(left_side.is_constant() && !left_side.handle()->IsSmi()) ||
|
(left_side.is_constant() && !left_side.handle()->IsSmi()) ||
|
||||||
(right_side.is_constant() && !right_side.handle()->IsSmi());
|
(right_side.is_constant() && !right_side.handle()->IsSmi()) ||
|
||||||
|
left_side.type_info().IsDouble() ||
|
||||||
|
right_side.type_info().IsDouble();
|
||||||
|
|
||||||
NaNInformation nan_info =
|
NaNInformation nan_info =
|
||||||
(CouldBeNaN(left_side) && CouldBeNaN(right_side)) ?
|
(CouldBeNaN(left_side) && CouldBeNaN(right_side)) ?
|
||||||
kBothCouldBeNaN :
|
kBothCouldBeNaN :
|
||||||
kCantBothBeNaN;
|
kCantBothBeNaN;
|
||||||
|
|
||||||
|
// Inline number comparison handling any combination of smi's and heap
|
||||||
|
// numbers if:
|
||||||
|
// code is in a loop
|
||||||
|
// the compare operation is different from equal
|
||||||
|
// compare is not a for-loop comparison
|
||||||
|
// The reason for excluding equal is that it will most likely be done
|
||||||
|
// with smi's (not heap numbers) and the code to comparing smi's is inlined
|
||||||
|
// separately. The same reason applies for for-loop comparison which will
|
||||||
|
// also most likely be smi comparisons.
|
||||||
|
bool is_loop_condition = (node->AsExpression() != NULL)
|
||||||
|
&& node->AsExpression()->is_loop_condition();
|
||||||
|
bool inline_number_compare =
|
||||||
|
loop_nesting() > 0 && cc != equal && !is_loop_condition;
|
||||||
|
|
||||||
left_side.ToRegister();
|
left_side.ToRegister();
|
||||||
right_side.ToRegister();
|
right_side.ToRegister();
|
||||||
|
|
||||||
if (known_non_smi) {
|
if (known_non_smi) {
|
||||||
|
// Inlined equality check:
|
||||||
// If at least one of the objects is not NaN, then if the objects
|
// If at least one of the objects is not NaN, then if the objects
|
||||||
// are identical, they are equal.
|
// are identical, they are equal.
|
||||||
if (nan_info == kCantBothBeNaN && cc == equal) {
|
if (nan_info == kCantBothBeNaN && cc == equal) {
|
||||||
@ -5660,8 +5694,15 @@ void CodeGenerator::Comparison(AstNode* node,
|
|||||||
dest->true_target()->Branch(equal);
|
dest->true_target()->Branch(equal);
|
||||||
}
|
}
|
||||||
|
|
||||||
// When non-smi, call out to the compare stub.
|
// Inlined number comparison:
|
||||||
CompareStub stub(cc, strict);
|
if (inline_number_compare) {
|
||||||
|
GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call the compare stub.
|
||||||
|
// TODO(whesse@chromium.org): Enable the inlining flag once
|
||||||
|
// GenerateInlineNumberComparison is implemented.
|
||||||
|
CompareStub stub(cc, strict, nan_info, true || !inline_number_compare);
|
||||||
Result answer = frame_->CallStub(&stub, &left_side, &right_side);
|
Result answer = frame_->CallStub(&stub, &left_side, &right_side);
|
||||||
// The result is a Smi, which is negative, zero, or positive.
|
// The result is a Smi, which is negative, zero, or positive.
|
||||||
__ SmiTest(answer.reg()); // Sets both zero and sign flag.
|
__ SmiTest(answer.reg()); // Sets both zero and sign flag.
|
||||||
@ -5679,15 +5720,23 @@ void CodeGenerator::Comparison(AstNode* node,
|
|||||||
|
|
||||||
Condition both_smi = masm_->CheckBothSmi(left_reg, right_reg);
|
Condition both_smi = masm_->CheckBothSmi(left_reg, right_reg);
|
||||||
is_smi.Branch(both_smi);
|
is_smi.Branch(both_smi);
|
||||||
// When non-smi, call out to the compare stub, after inlined checks.
|
|
||||||
// If at least one of the objects is not NaN, then if the objects
|
// Inline the equality check if both operands can't be a NaN. If both
|
||||||
// are identical, they are equal.
|
// objects are the same they are equal.
|
||||||
if (nan_info == kCantBothBeNaN && cc == equal) {
|
if (nan_info == kCantBothBeNaN && cc == equal) {
|
||||||
__ cmpq(left_side.reg(), right_side.reg());
|
__ cmpq(left_side.reg(), right_side.reg());
|
||||||
dest->true_target()->Branch(equal);
|
dest->true_target()->Branch(equal);
|
||||||
}
|
}
|
||||||
|
|
||||||
CompareStub stub(cc, strict);
|
// Inlined number comparison:
|
||||||
|
if (inline_number_compare) {
|
||||||
|
GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call the compare stub.
|
||||||
|
// TODO(whesse@chromium.org): Enable the inlining flag once
|
||||||
|
// GenerateInlineNumberComparison is implemented.
|
||||||
|
CompareStub stub(cc, strict, nan_info, true || !inline_number_compare);
|
||||||
Result answer = frame_->CallStub(&stub, &left_side, &right_side);
|
Result answer = frame_->CallStub(&stub, &left_side, &right_side);
|
||||||
__ SmiTest(answer.reg()); // Sets both zero and sign flags.
|
__ SmiTest(answer.reg()); // Sets both zero and sign flags.
|
||||||
answer.Unuse();
|
answer.Unuse();
|
||||||
@ -5706,6 +5755,17 @@ void CodeGenerator::Comparison(AstNode* node,
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void CodeGenerator::GenerateInlineNumberComparison(Result* left_side,
|
||||||
|
Result* right_side,
|
||||||
|
Condition cc,
|
||||||
|
ControlDestination* dest) {
|
||||||
|
ASSERT(left_side->is_register());
|
||||||
|
ASSERT(right_side->is_register());
|
||||||
|
// TODO(whesse@chromium.org): Implement this function, and enable the
|
||||||
|
// corresponding flags in the CompareStub.
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
class DeferredInlineBinaryOperation: public DeferredCode {
|
class DeferredInlineBinaryOperation: public DeferredCode {
|
||||||
public:
|
public:
|
||||||
DeferredInlineBinaryOperation(Token::Value op,
|
DeferredInlineBinaryOperation(Token::Value op,
|
||||||
@ -7710,6 +7770,18 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm,
|
||||||
|
Register hash,
|
||||||
|
Register mask) {
|
||||||
|
__ and_(hash, mask);
|
||||||
|
// Each entry in string cache consists of two pointer sized fields,
|
||||||
|
// but times_twice_pointer_size (multiplication by 16) scale factor
|
||||||
|
// is not supported by addrmode on x64 platform.
|
||||||
|
// So we have to premultiply entry index before lookup.
|
||||||
|
__ shl(hash, Immediate(kPointerSizeLog2 + 1));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
|
void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
|
||||||
Register object,
|
Register object,
|
||||||
Register result,
|
Register result,
|
||||||
@ -7717,12 +7789,6 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
|
|||||||
Register scratch2,
|
Register scratch2,
|
||||||
bool object_is_smi,
|
bool object_is_smi,
|
||||||
Label* not_found) {
|
Label* not_found) {
|
||||||
// Currently only lookup for smis. Check for smi if object is not known to be
|
|
||||||
// a smi.
|
|
||||||
if (!object_is_smi) {
|
|
||||||
__ JumpIfNotSmi(object, not_found);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use of registers. Register result is used as a temporary.
|
// Use of registers. Register result is used as a temporary.
|
||||||
Register number_string_cache = result;
|
Register number_string_cache = result;
|
||||||
Register mask = scratch1;
|
Register mask = scratch1;
|
||||||
@ -7738,28 +7804,57 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
|
|||||||
__ subl(mask, Immediate(1)); // Make mask.
|
__ subl(mask, Immediate(1)); // Make mask.
|
||||||
|
|
||||||
// Calculate the entry in the number string cache. The hash value in the
|
// Calculate the entry in the number string cache. The hash value in the
|
||||||
// number string cache for smis is just the smi value.
|
// number string cache for smis is just the smi value, and the hash for
|
||||||
|
// doubles is the xor of the upper and lower words. See
|
||||||
|
// Heap::GetNumberStringCache.
|
||||||
|
Label is_smi;
|
||||||
|
Label load_result_from_cache;
|
||||||
|
if (!object_is_smi) {
|
||||||
|
__ JumpIfSmi(object, &is_smi);
|
||||||
|
__ CheckMap(object, Factory::heap_number_map(), not_found, true);
|
||||||
|
|
||||||
|
ASSERT_EQ(8, kDoubleSize);
|
||||||
|
__ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
|
||||||
|
__ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
|
||||||
|
GenerateConvertHashCodeToIndex(masm, scratch, mask);
|
||||||
|
|
||||||
|
Register index = scratch;
|
||||||
|
Register probe = mask;
|
||||||
|
__ movq(probe,
|
||||||
|
FieldOperand(number_string_cache,
|
||||||
|
index,
|
||||||
|
times_1,
|
||||||
|
FixedArray::kHeaderSize));
|
||||||
|
__ JumpIfSmi(probe, not_found);
|
||||||
|
ASSERT(CpuFeatures::IsSupported(SSE2));
|
||||||
|
CpuFeatures::Scope fscope(SSE2);
|
||||||
|
__ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
|
||||||
|
__ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
|
||||||
|
__ comisd(xmm0, xmm1);
|
||||||
|
__ j(parity_even, not_found); // Bail out if NaN is involved.
|
||||||
|
__ j(not_equal, not_found); // The cache did not contain this value.
|
||||||
|
__ jmp(&load_result_from_cache);
|
||||||
|
}
|
||||||
|
|
||||||
|
__ bind(&is_smi);
|
||||||
__ movq(scratch, object);
|
__ movq(scratch, object);
|
||||||
__ SmiToInteger32(scratch, scratch);
|
__ SmiToInteger32(scratch, scratch);
|
||||||
__ andl(scratch, mask);
|
GenerateConvertHashCodeToIndex(masm, scratch, mask);
|
||||||
|
|
||||||
// Each entry in string cache consists of two pointer sized fields,
|
Register index = scratch;
|
||||||
// but times_twice_pointer_size (multiplication by 16) scale factor
|
|
||||||
// is not supported by addrmode on x64 platform.
|
|
||||||
// So we have to premultiply entry index before lookup
|
|
||||||
__ shl(scratch, Immediate(kPointerSizeLog2 + 1));
|
|
||||||
// Check if the entry is the smi we are looking for.
|
// Check if the entry is the smi we are looking for.
|
||||||
__ cmpq(object,
|
__ cmpq(object,
|
||||||
FieldOperand(number_string_cache,
|
FieldOperand(number_string_cache,
|
||||||
scratch,
|
index,
|
||||||
times_1,
|
times_1,
|
||||||
FixedArray::kHeaderSize));
|
FixedArray::kHeaderSize));
|
||||||
__ j(not_equal, not_found);
|
__ j(not_equal, not_found);
|
||||||
|
|
||||||
// Get the result from the cache.
|
// Get the result from the cache.
|
||||||
|
__ bind(&load_result_from_cache);
|
||||||
__ movq(result,
|
__ movq(result,
|
||||||
FieldOperand(number_string_cache,
|
FieldOperand(number_string_cache,
|
||||||
scratch,
|
index,
|
||||||
times_1,
|
times_1,
|
||||||
FixedArray::kHeaderSize + kPointerSize));
|
FixedArray::kHeaderSize + kPointerSize));
|
||||||
__ IncrementCounter(&Counters::number_to_string_native, 1);
|
__ IncrementCounter(&Counters::number_to_string_native, 1);
|
||||||
@ -7777,64 +7872,94 @@ void NumberToStringStub::Generate(MacroAssembler* masm) {
|
|||||||
|
|
||||||
__ bind(&runtime);
|
__ bind(&runtime);
|
||||||
// Handle number to string in the runtime system if not found in the cache.
|
// Handle number to string in the runtime system if not found in the cache.
|
||||||
__ TailCallRuntime(Runtime::kNumberToString, 1, 1);
|
__ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static int NegativeComparisonResult(Condition cc) {
|
||||||
|
ASSERT(cc != equal);
|
||||||
|
ASSERT((cc == less) || (cc == less_equal)
|
||||||
|
|| (cc == greater) || (cc == greater_equal));
|
||||||
|
return (cc == greater || cc == greater_equal) ? LESS : GREATER;
|
||||||
|
}
|
||||||
|
|
||||||
void CompareStub::Generate(MacroAssembler* masm) {
|
void CompareStub::Generate(MacroAssembler* masm) {
|
||||||
Label call_builtin, done;
|
Label call_builtin, done;
|
||||||
|
|
||||||
// NOTICE! This code is only reached after a smi-fast-case check, so
|
// NOTICE! This code is only reached after a smi-fast-case check, so
|
||||||
// it is certain that at least one operand isn't a smi.
|
// it is certain that at least one operand isn't a smi.
|
||||||
|
|
||||||
if (cc_ == equal) { // Both strict and non-strict.
|
// Identical objects can be compared fast, but there are some tricky cases
|
||||||
Label slow; // Fallthrough label.
|
// for NaN and undefined.
|
||||||
// Equality is almost reflexive (everything but NaN), so start by testing
|
{
|
||||||
// for "identity and not NaN".
|
Label not_identical;
|
||||||
{
|
__ cmpq(rax, rdx);
|
||||||
Label not_identical;
|
__ j(not_equal, ¬_identical);
|
||||||
__ cmpq(rax, rdx);
|
|
||||||
__ j(not_equal, ¬_identical);
|
|
||||||
// Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
|
|
||||||
// so we do the second best thing - test it ourselves.
|
|
||||||
|
|
||||||
if (never_nan_nan_) {
|
if (cc_ != equal) {
|
||||||
__ xor_(rax, rax);
|
// Check for undefined. undefined OP undefined is false even though
|
||||||
__ ret(0);
|
// undefined == undefined.
|
||||||
} else {
|
Label check_for_nan;
|
||||||
Label return_equal;
|
__ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
|
||||||
Label heap_number;
|
__ j(not_equal, &check_for_nan);
|
||||||
// If it's not a heap number, then return equal.
|
__ Set(rax, NegativeComparisonResult(cc_));
|
||||||
__ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
|
__ ret(0);
|
||||||
Factory::heap_number_map());
|
__ bind(&check_for_nan);
|
||||||
__ j(equal, &heap_number);
|
}
|
||||||
__ bind(&return_equal);
|
|
||||||
__ xor_(rax, rax);
|
|
||||||
__ ret(0);
|
|
||||||
|
|
||||||
__ bind(&heap_number);
|
// Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
|
||||||
// It is a heap number, so return non-equal if it's NaN and equal if
|
// so we do the second best thing - test it ourselves.
|
||||||
// it's not NaN.
|
// Note: if cc_ != equal, never_nan_nan_ is not used.
|
||||||
// The representation of NaN values has all exponent bits (52..62) set,
|
if (never_nan_nan_ && (cc_ == equal)) {
|
||||||
// and not all mantissa bits (0..51) clear.
|
__ Set(rax, EQUAL);
|
||||||
// We only allow QNaNs, which have bit 51 set (which also rules out
|
__ ret(0);
|
||||||
// the value being Infinity).
|
} else {
|
||||||
|
Label return_equal;
|
||||||
|
Label heap_number;
|
||||||
|
// If it's not a heap number, then return equal.
|
||||||
|
__ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
|
||||||
|
Factory::heap_number_map());
|
||||||
|
__ j(equal, &heap_number);
|
||||||
|
__ bind(&return_equal);
|
||||||
|
__ Set(rax, EQUAL);
|
||||||
|
__ ret(0);
|
||||||
|
|
||||||
// Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
|
__ bind(&heap_number);
|
||||||
// all bits in the mask are set. We only need to check the word
|
// It is a heap number, so return non-equal if it's NaN and equal if
|
||||||
// that contains the exponent and high bit of the mantissa.
|
// it's not NaN.
|
||||||
ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u);
|
// The representation of NaN values has all exponent bits (52..62) set,
|
||||||
__ movl(rdx, FieldOperand(rdx, HeapNumber::kExponentOffset));
|
// and not all mantissa bits (0..51) clear.
|
||||||
__ xorl(rax, rax);
|
// We only allow QNaNs, which have bit 51 set (which also rules out
|
||||||
__ addl(rdx, rdx); // Shift value and mask so mask applies to top bits.
|
// the value being Infinity).
|
||||||
__ cmpl(rdx, Immediate(kQuietNaNHighBitsMask << 1));
|
|
||||||
|
// Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
|
||||||
|
// all bits in the mask are set. We only need to check the word
|
||||||
|
// that contains the exponent and high bit of the mantissa.
|
||||||
|
ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u);
|
||||||
|
__ movl(rdx, FieldOperand(rdx, HeapNumber::kExponentOffset));
|
||||||
|
__ xorl(rax, rax);
|
||||||
|
__ addl(rdx, rdx); // Shift value and mask so mask applies to top bits.
|
||||||
|
__ cmpl(rdx, Immediate(kQuietNaNHighBitsMask << 1));
|
||||||
|
if (cc_ == equal) {
|
||||||
__ setcc(above_equal, rax);
|
__ setcc(above_equal, rax);
|
||||||
__ ret(0);
|
__ ret(0);
|
||||||
|
} else {
|
||||||
|
Label nan;
|
||||||
|
__ j(above_equal, &nan);
|
||||||
|
__ Set(rax, EQUAL);
|
||||||
|
__ ret(0);
|
||||||
|
__ bind(&nan);
|
||||||
|
__ Set(rax, NegativeComparisonResult(cc_));
|
||||||
|
__ ret(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
__ bind(¬_identical);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
__ bind(¬_identical);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cc_ == equal) { // Both strict and non-strict.
|
||||||
|
Label slow; // Fallthrough label.
|
||||||
|
|
||||||
// If we're doing a strict equality comparison, we don't have to do
|
// If we're doing a strict equality comparison, we don't have to do
|
||||||
// type conversion, so we generate code to do fast comparison for objects
|
// type conversion, so we generate code to do fast comparison for objects
|
||||||
// and oddballs. Non-smi numbers and strings still go through the usual
|
// and oddballs. Non-smi numbers and strings still go through the usual
|
||||||
@ -7896,36 +8021,43 @@ void CompareStub::Generate(MacroAssembler* masm) {
|
|||||||
__ push(rdx);
|
__ push(rdx);
|
||||||
__ push(rcx);
|
__ push(rcx);
|
||||||
|
|
||||||
// Inlined floating point compare.
|
// Generate the number comparison code.
|
||||||
// Call builtin if operands are not floating point or smi.
|
if (include_number_compare_) {
|
||||||
Label check_for_symbols;
|
Label non_number_comparison;
|
||||||
// Push arguments on stack, for helper functions.
|
Label unordered;
|
||||||
FloatingPointHelper::CheckNumberOperands(masm, &check_for_symbols);
|
FloatingPointHelper::LoadFloatOperand(masm, rdx, xmm0,
|
||||||
FloatingPointHelper::LoadFloatOperands(masm, rax, rdx);
|
&non_number_comparison);
|
||||||
__ FCmp();
|
FloatingPointHelper::LoadFloatOperand(masm, rax, xmm1,
|
||||||
|
&non_number_comparison);
|
||||||
|
|
||||||
// Jump to builtin for NaN.
|
__ comisd(xmm0, xmm1);
|
||||||
__ j(parity_even, &call_builtin);
|
|
||||||
|
|
||||||
// TODO(1243847): Use cmov below once CpuFeatures are properly hooked up.
|
// Don't base result on EFLAGS when a NaN is involved.
|
||||||
Label below_lbl, above_lbl;
|
__ j(parity_even, &unordered);
|
||||||
// use rdx, rax to convert unsigned to signed comparison
|
// Return a result of -1, 0, or 1, based on EFLAGS.
|
||||||
__ j(below, &below_lbl);
|
__ movq(rax, Immediate(0)); // equal
|
||||||
__ j(above, &above_lbl);
|
__ movq(rcx, Immediate(1));
|
||||||
|
__ cmovq(above, rax, rcx);
|
||||||
|
__ movq(rcx, Immediate(-1));
|
||||||
|
__ cmovq(below, rax, rcx);
|
||||||
|
__ ret(2 * kPointerSize); // rax, rdx were pushed
|
||||||
|
|
||||||
__ xor_(rax, rax); // equal
|
// If one of the numbers was NaN, then the result is always false.
|
||||||
__ ret(2 * kPointerSize);
|
// The cc is never not-equal.
|
||||||
|
__ bind(&unordered);
|
||||||
|
ASSERT(cc_ != not_equal);
|
||||||
|
if (cc_ == less || cc_ == less_equal) {
|
||||||
|
__ Set(rax, 1);
|
||||||
|
} else {
|
||||||
|
__ Set(rax, -1);
|
||||||
|
}
|
||||||
|
__ ret(2 * kPointerSize); // rax, rdx were pushed
|
||||||
|
|
||||||
__ bind(&below_lbl);
|
// The number comparison code did not provide a valid result.
|
||||||
__ movq(rax, Immediate(-1));
|
__ bind(&non_number_comparison);
|
||||||
__ ret(2 * kPointerSize);
|
}
|
||||||
|
|
||||||
__ bind(&above_lbl);
|
|
||||||
__ movq(rax, Immediate(1));
|
|
||||||
__ ret(2 * kPointerSize); // rax, rdx were pushed
|
|
||||||
|
|
||||||
// Fast negative check for symbol-to-symbol equality.
|
// Fast negative check for symbol-to-symbol equality.
|
||||||
__ bind(&check_for_symbols);
|
|
||||||
Label check_for_strings;
|
Label check_for_strings;
|
||||||
if (cc_ == equal) {
|
if (cc_ == equal) {
|
||||||
BranchIfNonSymbol(masm, &check_for_strings, rax, kScratchRegister);
|
BranchIfNonSymbol(masm, &check_for_strings, rax, kScratchRegister);
|
||||||
@ -7968,14 +8100,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
|
|||||||
builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
|
builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
|
||||||
} else {
|
} else {
|
||||||
builtin = Builtins::COMPARE;
|
builtin = Builtins::COMPARE;
|
||||||
int ncr; // NaN compare result
|
__ Push(Smi::FromInt(NegativeComparisonResult(cc_)));
|
||||||
if (cc_ == less || cc_ == less_equal) {
|
|
||||||
ncr = GREATER;
|
|
||||||
} else {
|
|
||||||
ASSERT(cc_ == greater || cc_ == greater_equal); // remaining cases
|
|
||||||
ncr = LESS;
|
|
||||||
}
|
|
||||||
__ Push(Smi::FromInt(ncr));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Restore return address on the stack.
|
// Restore return address on the stack.
|
||||||
@ -8764,6 +8889,27 @@ void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
|
||||||
|
Register src,
|
||||||
|
XMMRegister dst,
|
||||||
|
Label* not_number) {
|
||||||
|
Label load_smi, done;
|
||||||
|
ASSERT(!src.is(kScratchRegister));
|
||||||
|
__ JumpIfSmi(src, &load_smi);
|
||||||
|
__ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
|
||||||
|
__ cmpq(FieldOperand(src, HeapObject::kMapOffset), kScratchRegister);
|
||||||
|
__ j(not_equal, not_number);
|
||||||
|
__ movsd(dst, FieldOperand(src, HeapNumber::kValueOffset));
|
||||||
|
__ jmp(&done);
|
||||||
|
|
||||||
|
__ bind(&load_smi);
|
||||||
|
__ SmiToInteger32(kScratchRegister, src);
|
||||||
|
__ cvtlsi2sd(dst, kScratchRegister);
|
||||||
|
|
||||||
|
__ bind(&done);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
|
void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
|
||||||
XMMRegister dst1,
|
XMMRegister dst1,
|
||||||
XMMRegister dst2) {
|
XMMRegister dst2) {
|
||||||
|
8
deps/v8/src/x64/codegen-x64.h
vendored
8
deps/v8/src/x64/codegen-x64.h
vendored
@ -488,6 +488,10 @@ class CodeGenerator: public AstVisitor {
|
|||||||
Condition cc,
|
Condition cc,
|
||||||
bool strict,
|
bool strict,
|
||||||
ControlDestination* destination);
|
ControlDestination* destination);
|
||||||
|
void GenerateInlineNumberComparison(Result* left_side,
|
||||||
|
Result* right_side,
|
||||||
|
Condition cc,
|
||||||
|
ControlDestination* dest);
|
||||||
|
|
||||||
// To prevent long attacker-controlled byte sequences, integer constants
|
// To prevent long attacker-controlled byte sequences, integer constants
|
||||||
// from the JavaScript source are loaded in two parts if they are larger
|
// from the JavaScript source are loaded in two parts if they are larger
|
||||||
@ -939,6 +943,10 @@ class NumberToStringStub: public CodeStub {
|
|||||||
Label* not_found);
|
Label* not_found);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
static void GenerateConvertHashCodeToIndex(MacroAssembler* masm,
|
||||||
|
Register hash,
|
||||||
|
Register mask);
|
||||||
|
|
||||||
Major MajorKey() { return NumberToString; }
|
Major MajorKey() { return NumberToString; }
|
||||||
int MinorKey() { return 0; }
|
int MinorKey() { return 0; }
|
||||||
|
|
||||||
|
12
deps/v8/test/cctest/test-api.cc
vendored
12
deps/v8/test/cctest/test-api.cc
vendored
@ -569,6 +569,7 @@ THREADED_TEST(UsingExternalAsciiString) {
|
|||||||
|
|
||||||
THREADED_TEST(ScavengeExternalString) {
|
THREADED_TEST(ScavengeExternalString) {
|
||||||
TestResource::dispose_count = 0;
|
TestResource::dispose_count = 0;
|
||||||
|
bool in_new_space = false;
|
||||||
{
|
{
|
||||||
v8::HandleScope scope;
|
v8::HandleScope scope;
|
||||||
uint16_t* two_byte_string = AsciiToTwoByteString("test string");
|
uint16_t* two_byte_string = AsciiToTwoByteString("test string");
|
||||||
@ -576,16 +577,18 @@ THREADED_TEST(ScavengeExternalString) {
|
|||||||
String::NewExternal(new TestResource(two_byte_string));
|
String::NewExternal(new TestResource(two_byte_string));
|
||||||
i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
|
i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
|
||||||
i::Heap::CollectGarbage(0, i::NEW_SPACE);
|
i::Heap::CollectGarbage(0, i::NEW_SPACE);
|
||||||
CHECK(i::Heap::InNewSpace(*istring));
|
in_new_space = i::Heap::InNewSpace(*istring);
|
||||||
|
CHECK(in_new_space || i::Heap::old_data_space()->Contains(*istring));
|
||||||
CHECK_EQ(0, TestResource::dispose_count);
|
CHECK_EQ(0, TestResource::dispose_count);
|
||||||
}
|
}
|
||||||
i::Heap::CollectGarbage(0, i::NEW_SPACE);
|
i::Heap::CollectGarbage(0, in_new_space ? i::NEW_SPACE : i::OLD_DATA_SPACE);
|
||||||
CHECK_EQ(1, TestResource::dispose_count);
|
CHECK_EQ(1, TestResource::dispose_count);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
THREADED_TEST(ScavengeExternalAsciiString) {
|
THREADED_TEST(ScavengeExternalAsciiString) {
|
||||||
TestAsciiResource::dispose_count = 0;
|
TestAsciiResource::dispose_count = 0;
|
||||||
|
bool in_new_space = false;
|
||||||
{
|
{
|
||||||
v8::HandleScope scope;
|
v8::HandleScope scope;
|
||||||
const char* one_byte_string = "test string";
|
const char* one_byte_string = "test string";
|
||||||
@ -593,10 +596,11 @@ THREADED_TEST(ScavengeExternalAsciiString) {
|
|||||||
new TestAsciiResource(i::StrDup(one_byte_string)));
|
new TestAsciiResource(i::StrDup(one_byte_string)));
|
||||||
i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
|
i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
|
||||||
i::Heap::CollectGarbage(0, i::NEW_SPACE);
|
i::Heap::CollectGarbage(0, i::NEW_SPACE);
|
||||||
CHECK(i::Heap::InNewSpace(*istring));
|
in_new_space = i::Heap::InNewSpace(*istring);
|
||||||
|
CHECK(in_new_space || i::Heap::old_data_space()->Contains(*istring));
|
||||||
CHECK_EQ(0, TestAsciiResource::dispose_count);
|
CHECK_EQ(0, TestAsciiResource::dispose_count);
|
||||||
}
|
}
|
||||||
i::Heap::CollectGarbage(0, i::NEW_SPACE);
|
i::Heap::CollectGarbage(0, in_new_space ? i::NEW_SPACE : i::OLD_DATA_SPACE);
|
||||||
CHECK_EQ(1, TestAsciiResource::dispose_count);
|
CHECK_EQ(1, TestAsciiResource::dispose_count);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
8
deps/v8/test/cctest/test-debug.cc
vendored
8
deps/v8/test/cctest/test-debug.cc
vendored
@ -436,6 +436,12 @@ void CheckDebuggerUnloaded(bool check_functions) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void ForceUnloadDebugger() {
|
||||||
|
Debugger::never_unload_debugger_ = false;
|
||||||
|
Debugger::UnloadDebugger();
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
} } // namespace v8::internal
|
} } // namespace v8::internal
|
||||||
|
|
||||||
|
|
||||||
@ -6139,3 +6145,5 @@ TEST(CallingContextIsNotDebugContext) {
|
|||||||
debugger_context = v8::Handle<v8::Context>();
|
debugger_context = v8::Handle<v8::Context>();
|
||||||
CheckDebuggerUnloaded();
|
CheckDebuggerUnloaded();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
6
deps/v8/test/cctest/test-liveedit.cc
vendored
6
deps/v8/test/cctest/test-liveedit.cc
vendored
@ -38,7 +38,7 @@ using namespace v8::internal;
|
|||||||
// Anonymous namespace.
|
// Anonymous namespace.
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
class StringCompareInput : public Compare::Input {
|
class StringCompareInput : public Comparator::Input {
|
||||||
public:
|
public:
|
||||||
StringCompareInput(const char* s1, const char* s2) : s1_(s1), s2_(s2) {
|
StringCompareInput(const char* s1, const char* s2) : s1_(s1), s2_(s2) {
|
||||||
}
|
}
|
||||||
@ -72,7 +72,7 @@ class DiffChunkStruct : public ZoneObject {
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
class ListDiffOutputWriter : public Compare::Output {
|
class ListDiffOutputWriter : public Comparator::Output {
|
||||||
public:
|
public:
|
||||||
explicit ListDiffOutputWriter(DiffChunkStruct** next_chunk_pointer)
|
explicit ListDiffOutputWriter(DiffChunkStruct** next_chunk_pointer)
|
||||||
: next_chunk_pointer_(next_chunk_pointer) {
|
: next_chunk_pointer_(next_chunk_pointer) {
|
||||||
@ -98,7 +98,7 @@ void CompareStringsOneWay(const char* s1, const char* s2,
|
|||||||
DiffChunkStruct* first_chunk;
|
DiffChunkStruct* first_chunk;
|
||||||
ListDiffOutputWriter writer(&first_chunk);
|
ListDiffOutputWriter writer(&first_chunk);
|
||||||
|
|
||||||
Compare::CalculateDifference(&input, &writer);
|
Comparator::CalculateDifference(&input, &writer);
|
||||||
|
|
||||||
int len1 = StrLength(s1);
|
int len1 = StrLength(s1);
|
||||||
int len2 = StrLength(s2);
|
int len2 = StrLength(s2);
|
||||||
|
1
deps/v8/test/es5conform/es5conform.status
vendored
1
deps/v8/test/es5conform/es5conform.status
vendored
@ -38,7 +38,6 @@ chapter13: UNIMPLEMENTED
|
|||||||
chapter14: UNIMPLEMENTED
|
chapter14: UNIMPLEMENTED
|
||||||
chapter15/15.1: UNIMPLEMENTED
|
chapter15/15.1: UNIMPLEMENTED
|
||||||
chapter15/15.2/15.2.3/15.2.3.1: UNIMPLEMENTED
|
chapter15/15.2/15.2.3/15.2.3.1: UNIMPLEMENTED
|
||||||
chapter15/15.2/15.2.3/15.2.3.5: UNIMPLEMENTED
|
|
||||||
chapter15/15.2/15.2.3/15.2.3.8: UNIMPLEMENTED
|
chapter15/15.2/15.2.3/15.2.3.8: UNIMPLEMENTED
|
||||||
chapter15/15.2/15.2.3/15.2.3.9: UNIMPLEMENTED
|
chapter15/15.2/15.2.3/15.2.3.9: UNIMPLEMENTED
|
||||||
chapter15/15.2/15.2.3/15.2.3.10: UNIMPLEMENTED
|
chapter15/15.2/15.2.3/15.2.3.10: UNIMPLEMENTED
|
||||||
|
11
deps/v8/test/mjsunit/array-pop.js
vendored
11
deps/v8/test/mjsunit/array-pop.js
vendored
@ -59,3 +59,14 @@
|
|||||||
assertEquals(0, a.length, "length 9th pop");
|
assertEquals(0, a.length, "length 9th pop");
|
||||||
}
|
}
|
||||||
})();
|
})();
|
||||||
|
|
||||||
|
// Test the case of not JSArray receiver.
|
||||||
|
// Regression test for custom call generators, see issue 684.
|
||||||
|
(function() {
|
||||||
|
var a = [];
|
||||||
|
for (var i = 0; i < 100; i++) a.push(i);
|
||||||
|
var x = {__proto__: a};
|
||||||
|
for (var i = 0; i < 100; i++) {
|
||||||
|
assertEquals(99 - i, x.pop(), i + 'th iteration');
|
||||||
|
}
|
||||||
|
})();
|
||||||
|
10
deps/v8/test/mjsunit/array-push.js
vendored
10
deps/v8/test/mjsunit/array-push.js
vendored
@ -103,3 +103,13 @@
|
|||||||
assertEquals(29, a.push(29));
|
assertEquals(29, a.push(29));
|
||||||
}
|
}
|
||||||
})();
|
})();
|
||||||
|
|
||||||
|
// Test the case of not JSArray receiver.
|
||||||
|
// Regression test for custom call generators, see issue 684.
|
||||||
|
(function() {
|
||||||
|
var x = {__proto__: []};
|
||||||
|
for (var i = 0; i < 100; i++) {
|
||||||
|
x.push("a");
|
||||||
|
assertEquals(i + 1, x.length, i + 'th iteration');
|
||||||
|
}
|
||||||
|
})();
|
||||||
|
4
deps/v8/test/mjsunit/binary-op-newspace.js
vendored
4
deps/v8/test/mjsunit/binary-op-newspace.js
vendored
@ -30,14 +30,14 @@
|
|||||||
* in heap number allocation still works.
|
* in heap number allocation still works.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// Flags: --max-new-space-size=131072
|
// Flags: --max-new-space-size=262144
|
||||||
|
|
||||||
function f(x) {
|
function f(x) {
|
||||||
return x % 3;
|
return x % 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
function test() {
|
function test() {
|
||||||
for (var i = 0; i < 20000; i++) {
|
for (var i = 0; i < 40000; i++) {
|
||||||
assertEquals(-1 / 0, 1 / f(-3));
|
assertEquals(-1 / 0, 1 / f(-3));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
48
deps/v8/test/mjsunit/regexp.js
vendored
48
deps/v8/test/mjsunit/regexp.js
vendored
@ -436,3 +436,51 @@ assertTrue(re.multiline);
|
|||||||
assertEquals(0, re.lastIndex);
|
assertEquals(0, re.lastIndex);
|
||||||
assertEquals(37, re.someOtherProperty);
|
assertEquals(37, re.someOtherProperty);
|
||||||
assertEquals(37, re[42]);
|
assertEquals(37, re[42]);
|
||||||
|
|
||||||
|
// Test boundary-checks.
|
||||||
|
function assertRegExpTest(re, input, test) {
|
||||||
|
assertEquals(test, re.test(input), "test:" + re + ":" + input);
|
||||||
|
}
|
||||||
|
|
||||||
|
assertRegExpTest(/b\b/, "b", true);
|
||||||
|
assertRegExpTest(/b\b$/, "b", true);
|
||||||
|
assertRegExpTest(/\bb/, "b", true);
|
||||||
|
assertRegExpTest(/^\bb/, "b", true);
|
||||||
|
assertRegExpTest(/,\b/, ",", false);
|
||||||
|
assertRegExpTest(/,\b$/, ",", false);
|
||||||
|
assertRegExpTest(/\b,/, ",", false);
|
||||||
|
assertRegExpTest(/^\b,/, ",", false);
|
||||||
|
|
||||||
|
assertRegExpTest(/b\B/, "b", false);
|
||||||
|
assertRegExpTest(/b\B$/, "b", false);
|
||||||
|
assertRegExpTest(/\Bb/, "b", false);
|
||||||
|
assertRegExpTest(/^\Bb/, "b", false);
|
||||||
|
assertRegExpTest(/,\B/, ",", true);
|
||||||
|
assertRegExpTest(/,\B$/, ",", true);
|
||||||
|
assertRegExpTest(/\B,/, ",", true);
|
||||||
|
assertRegExpTest(/^\B,/, ",", true);
|
||||||
|
|
||||||
|
assertRegExpTest(/b\b/, "b,", true);
|
||||||
|
assertRegExpTest(/b\b/, "ba", false);
|
||||||
|
assertRegExpTest(/b\B/, "b,", false);
|
||||||
|
assertRegExpTest(/b\B/, "ba", true);
|
||||||
|
|
||||||
|
assertRegExpTest(/b\Bb/, "bb", true);
|
||||||
|
assertRegExpTest(/b\bb/, "bb", false);
|
||||||
|
|
||||||
|
assertRegExpTest(/b\b[,b]/, "bb", false);
|
||||||
|
assertRegExpTest(/b\B[,b]/, "bb", true);
|
||||||
|
assertRegExpTest(/b\b[,b]/, "b,", true);
|
||||||
|
assertRegExpTest(/b\B[,b]/, "b,", false);
|
||||||
|
|
||||||
|
assertRegExpTest(/[,b]\bb/, "bb", false);
|
||||||
|
assertRegExpTest(/[,b]\Bb/, "bb", true);
|
||||||
|
assertRegExpTest(/[,b]\bb/, ",b", true);
|
||||||
|
assertRegExpTest(/[,b]\Bb/, ",b", false);
|
||||||
|
|
||||||
|
assertRegExpTest(/[,b]\b[,b]/, "bb", false);
|
||||||
|
assertRegExpTest(/[,b]\B[,b]/, "bb", true);
|
||||||
|
assertRegExpTest(/[,b]\b[,b]/, ",b", true);
|
||||||
|
assertRegExpTest(/[,b]\B[,b]/, ",b", false);
|
||||||
|
assertRegExpTest(/[,b]\b[,b]/, "b,", true);
|
||||||
|
assertRegExpTest(/[,b]\B[,b]/, "b,", false);
|
||||||
|
45
deps/v8/test/mjsunit/regress/regress-crbug-40931.js
vendored
Normal file
45
deps/v8/test/mjsunit/regress/regress-crbug-40931.js
vendored
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following
|
||||||
|
// disclaimer in the documentation and/or other materials provided
|
||||||
|
// with the distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived
|
||||||
|
// from this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
// See http://crbug.com/40931
|
||||||
|
|
||||||
|
// To reproduce this we need to split a comma separated string and check the
|
||||||
|
// indices which should only contain the numeric indices corresponding to the
|
||||||
|
// number of values of the split.
|
||||||
|
|
||||||
|
var names = "a,b,c,d";
|
||||||
|
|
||||||
|
for(var i = 0; i < 10; i++) {
|
||||||
|
var splitNames = names.split(/,/);
|
||||||
|
var forInNames = [];
|
||||||
|
var count = 0;
|
||||||
|
for (name in splitNames) {
|
||||||
|
forInNames[count++] = name;
|
||||||
|
}
|
||||||
|
forInNames.sort();
|
||||||
|
assertEquals("0,1,2,3", forInNames.join());
|
||||||
|
}
|
62
deps/v8/test/mjsunit/search-string-multiple.js
vendored
Normal file
62
deps/v8/test/mjsunit/search-string-multiple.js
vendored
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following
|
||||||
|
// disclaimer in the documentation and/or other materials provided
|
||||||
|
// with the distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived
|
||||||
|
// from this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
// Test search and replace where we search for a string, not a regexp.
|
||||||
|
|
||||||
|
function TestCase(id, expected_output, regexp_source, flags, input) {
|
||||||
|
print(id);
|
||||||
|
var re = new RegExp(regexp_source, flags);
|
||||||
|
var output = input.replace(re, MakeReplaceString);
|
||||||
|
assertEquals(expected_output, output, id);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
function MakeReplaceString() {
|
||||||
|
// Arg 0 is the match, n captures follow, n + 1 is offset of match, n + 2 is
|
||||||
|
// the subject.
|
||||||
|
var l = arguments.length;
|
||||||
|
var a = new Array(l - 3);
|
||||||
|
a.push(arguments[0]);
|
||||||
|
for (var i = 2; i < l - 2; i++) {
|
||||||
|
a.push(arguments[i]);
|
||||||
|
}
|
||||||
|
return "[@" + arguments[l - 2] + ":" + a.join(",") + "]";
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
(function () {
|
||||||
|
TestCase(1,
|
||||||
|
"ajaxNiceForm.villesHome([@24:#OBJ#])",
|
||||||
|
"#OBJ#",
|
||||||
|
"g",
|
||||||
|
"ajaxNiceForm.villesHome(#OBJ#)");
|
||||||
|
TestCase(2,
|
||||||
|
"A long string with no non-ASCII characters",
|
||||||
|
"Unicode string \u1234",
|
||||||
|
"g",
|
||||||
|
"A long string with no non-ASCII characters");
|
||||||
|
})();
|
2
deps/v8/tools/utils.py
vendored
2
deps/v8/tools/utils.py
vendored
@ -71,6 +71,8 @@ def GuessArchitecture():
|
|||||||
return 'ia32'
|
return 'ia32'
|
||||||
elif id == 'i86pc':
|
elif id == 'i86pc':
|
||||||
return 'ia32'
|
return 'ia32'
|
||||||
|
elif id == 'amd64':
|
||||||
|
return 'ia32'
|
||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user