v8: upgrade to 3.22.24.17
This commit is contained in:
parent
c79c304ead
commit
f78e5df854
8
deps/v8/src/arguments.cc
vendored
8
deps/v8/src/arguments.cc
vendored
@ -117,4 +117,12 @@ FOR_EACH_CALLBACK_TABLE_MAPPING_2_VOID_RETURN(WRITE_CALL_2_VOID)
|
||||
#undef WRITE_CALL_2_VOID
|
||||
|
||||
|
||||
double ClobberDoubleRegisters(double x1, double x2, double x3, double x4) {
|
||||
// TODO(ulan): This clobbers only subset of registers depending on compiler,
|
||||
// Rewrite this in assembly to really clobber all registers.
|
||||
// GCC for ia32 uses the FPU and does not touch XMM registers.
|
||||
return x1 * 1.01 + x2 * 2.02 + x3 * 3.03 + x4 * 4.04;
|
||||
}
|
||||
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
11
deps/v8/src/arguments.h
vendored
11
deps/v8/src/arguments.h
vendored
@ -289,12 +289,23 @@ class FunctionCallbackArguments
|
||||
};
|
||||
|
||||
|
||||
double ClobberDoubleRegisters(double x1, double x2, double x3, double x4);
|
||||
|
||||
|
||||
#ifdef DEBUG
|
||||
#define CLOBBER_DOUBLE_REGISTERS() ClobberDoubleRegisters(1, 2, 3, 4);
|
||||
#else
|
||||
#define CLOBBER_DOUBLE_REGISTERS()
|
||||
#endif
|
||||
|
||||
|
||||
#define DECLARE_RUNTIME_FUNCTION(Type, Name) \
|
||||
Type Name(int args_length, Object** args_object, Isolate* isolate)
|
||||
|
||||
#define RUNTIME_FUNCTION(Type, Name) \
|
||||
static Type __RT_impl_##Name(Arguments args, Isolate* isolate); \
|
||||
Type Name(int args_length, Object** args_object, Isolate* isolate) { \
|
||||
CLOBBER_DOUBLE_REGISTERS(); \
|
||||
Arguments args(args_length, args_object); \
|
||||
return __RT_impl_##Name(args, isolate); \
|
||||
} \
|
||||
|
15
deps/v8/src/arm/builtins-arm.cc
vendored
15
deps/v8/src/arm/builtins-arm.cc
vendored
@ -859,7 +859,8 @@ void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
|
||||
}
|
||||
|
||||
|
||||
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
|
||||
static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
|
||||
SaveFPRegsMode save_doubles) {
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
|
||||
@ -868,7 +869,7 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
|
||||
// registers.
|
||||
__ stm(db_w, sp, kJSCallerSaved | kCalleeSaved);
|
||||
// Pass the function and deoptimization type to the runtime system.
|
||||
__ CallRuntime(Runtime::kNotifyStubFailure, 0);
|
||||
__ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
|
||||
__ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved);
|
||||
}
|
||||
|
||||
@ -877,6 +878,16 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
|
||||
}
|
||||
|
||||
|
||||
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
|
||||
Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
|
||||
}
|
||||
|
||||
|
||||
void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
|
||||
Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
|
||||
}
|
||||
|
||||
|
||||
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
|
||||
Deoptimizer::BailoutType type) {
|
||||
{
|
||||
|
5
deps/v8/src/arm/deoptimizer-arm.cc
vendored
5
deps/v8/src/arm/deoptimizer-arm.cc
vendored
@ -127,6 +127,11 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
|
||||
}
|
||||
|
||||
|
||||
Code* Deoptimizer::NotifyStubFailureBuiltin() {
|
||||
return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles);
|
||||
}
|
||||
|
||||
|
||||
#define __ masm()->
|
||||
|
||||
// This code tries to be close to ia32 code so that any changes can be
|
||||
|
67
deps/v8/src/arm/lithium-codegen-arm.cc
vendored
67
deps/v8/src/arm/lithium-codegen-arm.cc
vendored
@ -98,6 +98,38 @@ void LCodeGen::Abort(BailoutReason reason) {
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::SaveCallerDoubles() {
|
||||
ASSERT(info()->saves_caller_doubles());
|
||||
ASSERT(NeedsEagerFrame());
|
||||
Comment(";;; Save clobbered callee double registers");
|
||||
int count = 0;
|
||||
BitVector* doubles = chunk()->allocated_double_registers();
|
||||
BitVector::Iterator save_iterator(doubles);
|
||||
while (!save_iterator.Done()) {
|
||||
__ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
|
||||
MemOperand(sp, count * kDoubleSize));
|
||||
save_iterator.Advance();
|
||||
count++;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::RestoreCallerDoubles() {
|
||||
ASSERT(info()->saves_caller_doubles());
|
||||
ASSERT(NeedsEagerFrame());
|
||||
Comment(";;; Restore clobbered callee double registers");
|
||||
BitVector* doubles = chunk()->allocated_double_registers();
|
||||
BitVector::Iterator save_iterator(doubles);
|
||||
int count = 0;
|
||||
while (!save_iterator.Done()) {
|
||||
__ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
|
||||
MemOperand(sp, count * kDoubleSize));
|
||||
save_iterator.Advance();
|
||||
count++;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool LCodeGen::GeneratePrologue() {
|
||||
ASSERT(is_generating());
|
||||
|
||||
@ -158,16 +190,7 @@ bool LCodeGen::GeneratePrologue() {
|
||||
}
|
||||
|
||||
if (info()->saves_caller_doubles()) {
|
||||
Comment(";;; Save clobbered callee double registers");
|
||||
int count = 0;
|
||||
BitVector* doubles = chunk()->allocated_double_registers();
|
||||
BitVector::Iterator save_iterator(doubles);
|
||||
while (!save_iterator.Done()) {
|
||||
__ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
|
||||
MemOperand(sp, count * kDoubleSize));
|
||||
save_iterator.Advance();
|
||||
count++;
|
||||
}
|
||||
SaveCallerDoubles();
|
||||
}
|
||||
|
||||
// Possibly allocate a local context.
|
||||
@ -313,6 +336,7 @@ bool LCodeGen::GenerateDeoptJumpTable() {
|
||||
Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
|
||||
}
|
||||
if (deopt_jump_table_[i].needs_frame) {
|
||||
ASSERT(!info()->saves_caller_doubles());
|
||||
__ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry)));
|
||||
if (needs_frame.is_bound()) {
|
||||
__ b(&needs_frame);
|
||||
@ -330,6 +354,10 @@ bool LCodeGen::GenerateDeoptJumpTable() {
|
||||
__ mov(pc, ip);
|
||||
}
|
||||
} else {
|
||||
if (info()->saves_caller_doubles()) {
|
||||
ASSERT(info()->IsStub());
|
||||
RestoreCallerDoubles();
|
||||
}
|
||||
__ mov(lr, Operand(pc), LeaveCC, al);
|
||||
__ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
|
||||
}
|
||||
@ -783,7 +811,10 @@ void LCodeGen::DeoptimizeIf(Condition condition,
|
||||
}
|
||||
|
||||
ASSERT(info()->IsStub() || frame_is_built_);
|
||||
if (condition == al && frame_is_built_) {
|
||||
// Go through jump table if we need to handle condition, build frame, or
|
||||
// restore caller doubles.
|
||||
if (condition == al && frame_is_built_ &&
|
||||
!info()->saves_caller_doubles()) {
|
||||
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
|
||||
} else {
|
||||
// We often have several deopts to the same entry, reuse the last
|
||||
@ -2853,16 +2884,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
|
||||
__ CallRuntime(Runtime::kTraceExit, 1);
|
||||
}
|
||||
if (info()->saves_caller_doubles()) {
|
||||
ASSERT(NeedsEagerFrame());
|
||||
BitVector* doubles = chunk()->allocated_double_registers();
|
||||
BitVector::Iterator save_iterator(doubles);
|
||||
int count = 0;
|
||||
while (!save_iterator.Done()) {
|
||||
__ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
|
||||
MemOperand(sp, count * kDoubleSize));
|
||||
save_iterator.Advance();
|
||||
count++;
|
||||
}
|
||||
RestoreCallerDoubles();
|
||||
}
|
||||
int no_frame_start = -1;
|
||||
if (NeedsEagerFrame()) {
|
||||
@ -3434,7 +3456,8 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
|
||||
__ jmp(&receiver_ok);
|
||||
|
||||
__ bind(&global_object);
|
||||
__ ldr(receiver, GlobalObjectOperand());
|
||||
__ ldr(receiver, MemOperand(fp, StandardFrameConstants::kContextOffset));
|
||||
__ ldr(receiver, ContextOperand(receiver, Context::GLOBAL_OBJECT_INDEX));
|
||||
__ ldr(receiver,
|
||||
FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
|
||||
__ bind(&receiver_ok);
|
||||
|
3
deps/v8/src/arm/lithium-codegen-arm.h
vendored
3
deps/v8/src/arm/lithium-codegen-arm.h
vendored
@ -186,6 +186,9 @@ class LCodeGen: public LCodeGenBase {
|
||||
|
||||
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
|
||||
|
||||
void SaveCallerDoubles();
|
||||
void RestoreCallerDoubles();
|
||||
|
||||
// Code generation passes. Returns true if code generation should
|
||||
// continue.
|
||||
bool GeneratePrologue();
|
||||
|
6
deps/v8/src/arm/macro-assembler-arm.h
vendored
6
deps/v8/src/arm/macro-assembler-arm.h
vendored
@ -1045,8 +1045,10 @@ class MacroAssembler: public Assembler {
|
||||
}
|
||||
|
||||
// Convenience function: Same as above, but takes the fid instead.
|
||||
void CallRuntime(Runtime::FunctionId id, int num_arguments) {
|
||||
CallRuntime(Runtime::FunctionForId(id), num_arguments);
|
||||
void CallRuntime(Runtime::FunctionId id,
|
||||
int num_arguments,
|
||||
SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
|
||||
CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
|
||||
}
|
||||
|
||||
// Convenience function: call an external reference.
|
||||
|
1
deps/v8/src/ast.cc
vendored
1
deps/v8/src/ast.cc
vendored
@ -554,6 +554,7 @@ bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
|
||||
if (!type->prototype()->IsJSObject()) return false;
|
||||
// Go up the prototype chain, recording where we are currently.
|
||||
holder_ = Handle<JSObject>(JSObject::cast(type->prototype()));
|
||||
JSObject::TryMigrateInstance(holder_);
|
||||
type = Handle<Map>(holder()->map());
|
||||
}
|
||||
}
|
||||
|
3
deps/v8/src/builtins.h
vendored
3
deps/v8/src/builtins.h
vendored
@ -111,6 +111,8 @@ enum BuiltinExtraArguments {
|
||||
V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \
|
||||
Code::kNoExtraICState) \
|
||||
V(NotifyStubFailure, BUILTIN, UNINITIALIZED, \
|
||||
Code::kNoExtraICState) \
|
||||
V(NotifyStubFailureSaveDoubles, BUILTIN, UNINITIALIZED, \
|
||||
Code::kNoExtraICState) \
|
||||
\
|
||||
V(LoadIC_Miss, BUILTIN, UNINITIALIZED, \
|
||||
@ -400,6 +402,7 @@ class Builtins {
|
||||
static void Generate_NotifySoftDeoptimized(MacroAssembler* masm);
|
||||
static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm);
|
||||
static void Generate_NotifyStubFailure(MacroAssembler* masm);
|
||||
static void Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm);
|
||||
static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm);
|
||||
|
||||
static void Generate_FunctionCall(MacroAssembler* masm);
|
||||
|
18
deps/v8/src/code-stubs-hydrogen.cc
vendored
18
deps/v8/src/code-stubs-hydrogen.cc
vendored
@ -721,15 +721,23 @@ HValue* CodeStubGraphBuilderBase::BuildArraySingleArgumentConstructor(
|
||||
|
||||
HValue* CodeStubGraphBuilderBase::BuildArrayNArgumentsConstructor(
|
||||
JSArrayBuilder* array_builder, ElementsKind kind) {
|
||||
// Insert a bounds check because the number of arguments might exceed
|
||||
// the kInitialMaxFastElementArray limit. This cannot happen for code
|
||||
// that was parsed, but calling via Array.apply(thisArg, [...]) might
|
||||
// trigger it.
|
||||
HValue* length = GetArgumentsLength();
|
||||
HConstant* max_alloc_length =
|
||||
Add<HConstant>(JSObject::kInitialMaxFastElementArray);
|
||||
HValue* checked_length = Add<HBoundsCheck>(length, max_alloc_length);
|
||||
|
||||
// We need to fill with the hole if it's a smi array in the multi-argument
|
||||
// case because we might have to bail out while copying arguments into
|
||||
// the array because they aren't compatible with a smi array.
|
||||
// If it's a double array, no problem, and if it's fast then no
|
||||
// problem either because doubles are boxed.
|
||||
HValue* length = GetArgumentsLength();
|
||||
bool fill_with_hole = IsFastSmiElementsKind(kind);
|
||||
HValue* new_object = array_builder->AllocateArray(length,
|
||||
length,
|
||||
HValue* new_object = array_builder->AllocateArray(checked_length,
|
||||
checked_length,
|
||||
fill_with_hole);
|
||||
HValue* elements = array_builder->GetElementsLocation();
|
||||
ASSERT(elements != NULL);
|
||||
@ -739,10 +747,10 @@ HValue* CodeStubGraphBuilderBase::BuildArrayNArgumentsConstructor(
|
||||
context(),
|
||||
LoopBuilder::kPostIncrement);
|
||||
HValue* start = graph()->GetConstant0();
|
||||
HValue* key = builder.BeginBody(start, length, Token::LT);
|
||||
HValue* key = builder.BeginBody(start, checked_length, Token::LT);
|
||||
HInstruction* argument_elements = Add<HArgumentsElements>(false);
|
||||
HInstruction* argument = Add<HAccessArgumentsAt>(
|
||||
argument_elements, length, key);
|
||||
argument_elements, checked_length, key);
|
||||
|
||||
Add<HStoreKeyed>(elements, key, argument, kind);
|
||||
builder.EndBody();
|
||||
|
2
deps/v8/src/date.js
vendored
2
deps/v8/src/date.js
vendored
@ -132,7 +132,7 @@ function TimeClip(time) {
|
||||
// strings over and over again.
|
||||
var Date_cache = {
|
||||
// Cached time value.
|
||||
time: NAN,
|
||||
time: 0,
|
||||
// String input for which the cached time is valid.
|
||||
string: null
|
||||
};
|
||||
|
3
deps/v8/src/deoptimizer.cc
vendored
3
deps/v8/src/deoptimizer.cc
vendored
@ -1574,8 +1574,7 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
|
||||
output_frame->SetPc(reinterpret_cast<intptr_t>(
|
||||
trampoline->instruction_start()));
|
||||
output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS));
|
||||
Code* notify_failure =
|
||||
isolate_->builtins()->builtin(Builtins::kNotifyStubFailure);
|
||||
Code* notify_failure = NotifyStubFailureBuiltin();
|
||||
output_frame->SetContinuation(
|
||||
reinterpret_cast<intptr_t>(notify_failure->entry()));
|
||||
}
|
||||
|
4
deps/v8/src/deoptimizer.h
vendored
4
deps/v8/src/deoptimizer.h
vendored
@ -412,6 +412,10 @@ class Deoptimizer : public Malloced {
|
||||
// at the dynamic alignment state slot inside the frame.
|
||||
bool HasAlignmentPadding(JSFunction* function);
|
||||
|
||||
// Select the version of NotifyStubFailure builtin that either saves or
|
||||
// doesn't save the double registers depending on CPU features.
|
||||
Code* NotifyStubFailureBuiltin();
|
||||
|
||||
Isolate* isolate_;
|
||||
JSFunction* function_;
|
||||
Code* compiled_code_;
|
||||
|
20
deps/v8/src/ia32/builtins-ia32.cc
vendored
20
deps/v8/src/ia32/builtins-ia32.cc
vendored
@ -601,7 +601,8 @@ void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
|
||||
}
|
||||
|
||||
|
||||
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
|
||||
static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
|
||||
SaveFPRegsMode save_doubles) {
|
||||
// Enter an internal frame.
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
@ -610,7 +611,7 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
|
||||
// stubs that tail call the runtime on deopts passing their parameters in
|
||||
// registers.
|
||||
__ pushad();
|
||||
__ CallRuntime(Runtime::kNotifyStubFailure, 0);
|
||||
__ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
|
||||
__ popad();
|
||||
// Tear down internal frame.
|
||||
}
|
||||
@ -620,6 +621,21 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
|
||||
}
|
||||
|
||||
|
||||
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
|
||||
Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
|
||||
}
|
||||
|
||||
|
||||
void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
|
||||
if (Serializer::enabled()) {
|
||||
PlatformFeatureScope sse2(SSE2);
|
||||
Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
|
||||
} else {
|
||||
Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
|
||||
Deoptimizer::BailoutType type) {
|
||||
{
|
||||
|
7
deps/v8/src/ia32/deoptimizer-ia32.cc
vendored
7
deps/v8/src/ia32/deoptimizer-ia32.cc
vendored
@ -231,6 +231,13 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
|
||||
}
|
||||
|
||||
|
||||
Code* Deoptimizer::NotifyStubFailureBuiltin() {
|
||||
Builtins::Name name = CpuFeatures::IsSupported(SSE2) ?
|
||||
Builtins::kNotifyStubFailureSaveDoubles : Builtins::kNotifyStubFailure;
|
||||
return isolate_->builtins()->builtin(name);
|
||||
}
|
||||
|
||||
|
||||
#define __ masm()->
|
||||
|
||||
void Deoptimizer::EntryGenerator::Generate() {
|
||||
|
62
deps/v8/src/ia32/lithium-codegen-ia32.cc
vendored
62
deps/v8/src/ia32/lithium-codegen-ia32.cc
vendored
@ -130,6 +130,40 @@ void LCodeGen::MakeSureStackPagesMapped(int offset) {
|
||||
#endif
|
||||
|
||||
|
||||
void LCodeGen::SaveCallerDoubles() {
|
||||
ASSERT(info()->saves_caller_doubles());
|
||||
ASSERT(NeedsEagerFrame());
|
||||
Comment(";;; Save clobbered callee double registers");
|
||||
CpuFeatureScope scope(masm(), SSE2);
|
||||
int count = 0;
|
||||
BitVector* doubles = chunk()->allocated_double_registers();
|
||||
BitVector::Iterator save_iterator(doubles);
|
||||
while (!save_iterator.Done()) {
|
||||
__ movsd(MemOperand(esp, count * kDoubleSize),
|
||||
XMMRegister::FromAllocationIndex(save_iterator.Current()));
|
||||
save_iterator.Advance();
|
||||
count++;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::RestoreCallerDoubles() {
|
||||
ASSERT(info()->saves_caller_doubles());
|
||||
ASSERT(NeedsEagerFrame());
|
||||
Comment(";;; Restore clobbered callee double registers");
|
||||
CpuFeatureScope scope(masm(), SSE2);
|
||||
BitVector* doubles = chunk()->allocated_double_registers();
|
||||
BitVector::Iterator save_iterator(doubles);
|
||||
int count = 0;
|
||||
while (!save_iterator.Done()) {
|
||||
__ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
|
||||
MemOperand(esp, count * kDoubleSize));
|
||||
save_iterator.Advance();
|
||||
count++;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool LCodeGen::GeneratePrologue() {
|
||||
ASSERT(is_generating());
|
||||
|
||||
@ -244,17 +278,7 @@ bool LCodeGen::GeneratePrologue() {
|
||||
}
|
||||
|
||||
if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
|
||||
Comment(";;; Save clobbered callee double registers");
|
||||
CpuFeatureScope scope(masm(), SSE2);
|
||||
int count = 0;
|
||||
BitVector* doubles = chunk()->allocated_double_registers();
|
||||
BitVector::Iterator save_iterator(doubles);
|
||||
while (!save_iterator.Done()) {
|
||||
__ movsd(MemOperand(esp, count * kDoubleSize),
|
||||
XMMRegister::FromAllocationIndex(save_iterator.Current()));
|
||||
save_iterator.Advance();
|
||||
count++;
|
||||
}
|
||||
SaveCallerDoubles();
|
||||
}
|
||||
}
|
||||
|
||||
@ -399,6 +423,7 @@ bool LCodeGen::GenerateJumpTable() {
|
||||
Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
|
||||
}
|
||||
if (jump_table_[i].needs_frame) {
|
||||
ASSERT(!info()->saves_caller_doubles());
|
||||
__ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
|
||||
if (needs_frame.is_bound()) {
|
||||
__ jmp(&needs_frame);
|
||||
@ -425,6 +450,9 @@ bool LCodeGen::GenerateJumpTable() {
|
||||
__ ret(0); // Call the continuation without clobbering registers.
|
||||
}
|
||||
} else {
|
||||
if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
|
||||
RestoreCallerDoubles();
|
||||
}
|
||||
__ call(entry, RelocInfo::RUNTIME_ENTRY);
|
||||
}
|
||||
}
|
||||
@ -3056,17 +3084,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
|
||||
__ CallRuntime(Runtime::kTraceExit, 1);
|
||||
}
|
||||
if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
|
||||
ASSERT(NeedsEagerFrame());
|
||||
CpuFeatureScope scope(masm(), SSE2);
|
||||
BitVector* doubles = chunk()->allocated_double_registers();
|
||||
BitVector::Iterator save_iterator(doubles);
|
||||
int count = 0;
|
||||
while (!save_iterator.Done()) {
|
||||
__ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
|
||||
MemOperand(esp, count * kDoubleSize));
|
||||
save_iterator.Advance();
|
||||
count++;
|
||||
}
|
||||
RestoreCallerDoubles();
|
||||
}
|
||||
if (dynamic_frame_alignment_) {
|
||||
// Fetch the state of the dynamic frame alignment.
|
||||
|
3
deps/v8/src/ia32/lithium-codegen-ia32.h
vendored
3
deps/v8/src/ia32/lithium-codegen-ia32.h
vendored
@ -198,6 +198,9 @@ class LCodeGen: public LCodeGenBase {
|
||||
|
||||
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
|
||||
|
||||
void SaveCallerDoubles();
|
||||
void RestoreCallerDoubles();
|
||||
|
||||
// Code generation passes. Returns true if code generation should
|
||||
// continue.
|
||||
void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
|
||||
|
6
deps/v8/src/ia32/macro-assembler-ia32.h
vendored
6
deps/v8/src/ia32/macro-assembler-ia32.h
vendored
@ -773,8 +773,10 @@ class MacroAssembler: public Assembler {
|
||||
}
|
||||
|
||||
// Convenience function: Same as above, but takes the fid instead.
|
||||
void CallRuntime(Runtime::FunctionId id, int num_arguments) {
|
||||
CallRuntime(Runtime::FunctionForId(id), num_arguments);
|
||||
void CallRuntime(Runtime::FunctionId id,
|
||||
int num_arguments,
|
||||
SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
|
||||
CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
|
||||
}
|
||||
|
||||
// Convenience function: call an external reference.
|
||||
|
1
deps/v8/src/mark-compact.cc
vendored
1
deps/v8/src/mark-compact.cc
vendored
@ -2643,6 +2643,7 @@ void MarkCompactCollector::ClearAndDeoptimizeDependentCode(
|
||||
|
||||
if (IsMarked(code) && !code->marked_for_deoptimization()) {
|
||||
code->set_marked_for_deoptimization(true);
|
||||
code->InvalidateEmbeddedObjects();
|
||||
have_code_to_deoptimize_ = true;
|
||||
}
|
||||
entries->clear_at(i);
|
||||
|
8
deps/v8/src/mips/assembler-mips-inl.h
vendored
8
deps/v8/src/mips/assembler-mips-inl.h
vendored
@ -271,16 +271,14 @@ Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
|
||||
Code* RelocInfo::code_age_stub() {
|
||||
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
|
||||
return Code::GetCodeFromTargetAddress(
|
||||
Memory::Address_at(pc_ + Assembler::kInstrSize *
|
||||
(kNoCodeAgeSequenceLength - 1)));
|
||||
Assembler::target_address_at(pc_ + Assembler::kInstrSize));
|
||||
}
|
||||
|
||||
|
||||
void RelocInfo::set_code_age_stub(Code* stub) {
|
||||
ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
|
||||
Memory::Address_at(pc_ + Assembler::kInstrSize *
|
||||
(kNoCodeAgeSequenceLength - 1)) =
|
||||
stub->instruction_start();
|
||||
Assembler::set_target_address_at(pc_ + Assembler::kInstrSize,
|
||||
stub->instruction_start());
|
||||
}
|
||||
|
||||
|
||||
|
25
deps/v8/src/mips/builtins-mips.cc
vendored
25
deps/v8/src/mips/builtins-mips.cc
vendored
@ -821,12 +821,9 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
|
||||
// internal frame to make the code faster, since we shouldn't have to do stack
|
||||
// crawls in MakeCodeYoung. This seems a bit fragile.
|
||||
|
||||
__ mov(a0, ra);
|
||||
// Adjust a0 to point to the head of the PlatformCodeAge sequence
|
||||
// Set a0 to point to the head of the PlatformCodeAge sequence.
|
||||
__ Subu(a0, a0,
|
||||
Operand((kNoCodeAgeSequenceLength - 1) * Assembler::kInstrSize));
|
||||
// Restore the original return address of the function
|
||||
__ mov(ra, at);
|
||||
|
||||
// The following registers must be saved and restored when calling through to
|
||||
// the runtime:
|
||||
@ -863,12 +860,9 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
|
||||
// save/restore the registers without worrying about which of them contain
|
||||
// pointers.
|
||||
|
||||
__ mov(a0, ra);
|
||||
// Adjust a0 to point to the head of the PlatformCodeAge sequence
|
||||
// Set a0 to point to the head of the PlatformCodeAge sequence.
|
||||
__ Subu(a0, a0,
|
||||
Operand((kNoCodeAgeSequenceLength - 1) * Assembler::kInstrSize));
|
||||
// Restore the original return address of the function
|
||||
__ mov(ra, at);
|
||||
|
||||
// The following registers must be saved and restored when calling through to
|
||||
// the runtime:
|
||||
@ -900,7 +894,8 @@ void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
|
||||
}
|
||||
|
||||
|
||||
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
|
||||
static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
|
||||
SaveFPRegsMode save_doubles) {
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
|
||||
@ -909,7 +904,7 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
|
||||
// registers.
|
||||
__ MultiPush(kJSCallerSaved | kCalleeSaved);
|
||||
// Pass the function and deoptimization type to the runtime system.
|
||||
__ CallRuntime(Runtime::kNotifyStubFailure, 0);
|
||||
__ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
|
||||
__ MultiPop(kJSCallerSaved | kCalleeSaved);
|
||||
}
|
||||
|
||||
@ -918,6 +913,16 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
|
||||
}
|
||||
|
||||
|
||||
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
|
||||
Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
|
||||
}
|
||||
|
||||
|
||||
void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
|
||||
Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
|
||||
}
|
||||
|
||||
|
||||
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
|
||||
Deoptimizer::BailoutType type) {
|
||||
{
|
||||
|
25
deps/v8/src/mips/codegen-mips.cc
vendored
25
deps/v8/src/mips/codegen-mips.cc
vendored
@ -641,8 +641,8 @@ void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
|
||||
*age = kNoAgeCodeAge;
|
||||
*parity = NO_MARKING_PARITY;
|
||||
} else {
|
||||
Address target_address = Memory::Address_at(
|
||||
sequence + Assembler::kInstrSize * (kNoCodeAgeSequenceLength - 1));
|
||||
Address target_address = Assembler::target_address_at(
|
||||
sequence + Assembler::kInstrSize);
|
||||
Code* stub = GetCodeFromTargetAddress(target_address);
|
||||
GetCodeAgeAndParity(stub, age, parity);
|
||||
}
|
||||
@ -661,17 +661,18 @@ void Code::PatchPlatformCodeAge(Isolate* isolate,
|
||||
} else {
|
||||
Code* stub = GetCodeAgeStub(isolate, age, parity);
|
||||
CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
|
||||
// Mark this code sequence for FindPlatformCodeAgeSequence()
|
||||
// Mark this code sequence for FindPlatformCodeAgeSequence().
|
||||
patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP);
|
||||
// Save the function's original return address
|
||||
// (it will be clobbered by Call(t9))
|
||||
patcher.masm()->mov(at, ra);
|
||||
// Load the stub address to t9 and call it
|
||||
patcher.masm()->li(t9,
|
||||
Operand(reinterpret_cast<uint32_t>(stub->instruction_start())));
|
||||
patcher.masm()->Call(t9);
|
||||
// Record the stub address in the empty space for GetCodeAgeAndParity()
|
||||
patcher.masm()->dd(reinterpret_cast<uint32_t>(stub->instruction_start()));
|
||||
// Load the stub address to t9 and call it,
|
||||
// GetCodeAgeAndParity() extracts the stub address from this instruction.
|
||||
patcher.masm()->li(
|
||||
t9,
|
||||
Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
|
||||
CONSTANT_SIZE);
|
||||
patcher.masm()->nop(); // Prevent jalr to jal optimization.
|
||||
patcher.masm()->jalr(t9, a0);
|
||||
patcher.masm()->nop(); // Branch delay slot nop.
|
||||
patcher.masm()->nop(); // Pad the empty space.
|
||||
}
|
||||
}
|
||||
|
||||
|
5
deps/v8/src/mips/deoptimizer-mips.cc
vendored
5
deps/v8/src/mips/deoptimizer-mips.cc
vendored
@ -125,6 +125,11 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
|
||||
}
|
||||
|
||||
|
||||
Code* Deoptimizer::NotifyStubFailureBuiltin() {
|
||||
return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles);
|
||||
}
|
||||
|
||||
|
||||
#define __ masm()->
|
||||
|
||||
|
||||
|
67
deps/v8/src/mips/lithium-codegen-mips.cc
vendored
67
deps/v8/src/mips/lithium-codegen-mips.cc
vendored
@ -98,6 +98,38 @@ void LChunkBuilder::Abort(BailoutReason reason) {
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::SaveCallerDoubles() {
|
||||
ASSERT(info()->saves_caller_doubles());
|
||||
ASSERT(NeedsEagerFrame());
|
||||
Comment(";;; Save clobbered callee double registers");
|
||||
int count = 0;
|
||||
BitVector* doubles = chunk()->allocated_double_registers();
|
||||
BitVector::Iterator save_iterator(doubles);
|
||||
while (!save_iterator.Done()) {
|
||||
__ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
|
||||
MemOperand(sp, count * kDoubleSize));
|
||||
save_iterator.Advance();
|
||||
count++;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::RestoreCallerDoubles() {
|
||||
ASSERT(info()->saves_caller_doubles());
|
||||
ASSERT(NeedsEagerFrame());
|
||||
Comment(";;; Restore clobbered callee double registers");
|
||||
BitVector* doubles = chunk()->allocated_double_registers();
|
||||
BitVector::Iterator save_iterator(doubles);
|
||||
int count = 0;
|
||||
while (!save_iterator.Done()) {
|
||||
__ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
|
||||
MemOperand(sp, count * kDoubleSize));
|
||||
save_iterator.Advance();
|
||||
count++;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool LCodeGen::GeneratePrologue() {
|
||||
ASSERT(is_generating());
|
||||
|
||||
@ -160,16 +192,7 @@ bool LCodeGen::GeneratePrologue() {
|
||||
}
|
||||
|
||||
if (info()->saves_caller_doubles()) {
|
||||
Comment(";;; Save clobbered callee double registers");
|
||||
int count = 0;
|
||||
BitVector* doubles = chunk()->allocated_double_registers();
|
||||
BitVector::Iterator save_iterator(doubles);
|
||||
while (!save_iterator.Done()) {
|
||||
__ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
|
||||
MemOperand(sp, count * kDoubleSize));
|
||||
save_iterator.Advance();
|
||||
count++;
|
||||
}
|
||||
SaveCallerDoubles();
|
||||
}
|
||||
|
||||
// Possibly allocate a local context.
|
||||
@ -298,6 +321,7 @@ bool LCodeGen::GenerateDeoptJumpTable() {
|
||||
}
|
||||
__ li(t9, Operand(ExternalReference::ForDeoptEntry(entry)));
|
||||
if (deopt_jump_table_[i].needs_frame) {
|
||||
ASSERT(!info()->saves_caller_doubles());
|
||||
if (needs_frame.is_bound()) {
|
||||
__ Branch(&needs_frame);
|
||||
} else {
|
||||
@ -313,6 +337,10 @@ bool LCodeGen::GenerateDeoptJumpTable() {
|
||||
__ Call(t9);
|
||||
}
|
||||
} else {
|
||||
if (info()->saves_caller_doubles()) {
|
||||
ASSERT(info()->IsStub());
|
||||
RestoreCallerDoubles();
|
||||
}
|
||||
__ Call(t9);
|
||||
}
|
||||
}
|
||||
@ -757,7 +785,10 @@ void LCodeGen::DeoptimizeIf(Condition condition,
|
||||
}
|
||||
|
||||
ASSERT(info()->IsStub() || frame_is_built_);
|
||||
if (condition == al && frame_is_built_) {
|
||||
// Go through jump table if we need to handle condition, build frame, or
|
||||
// restore caller doubles.
|
||||
if (condition == al && frame_is_built_ &&
|
||||
!info()->saves_caller_doubles()) {
|
||||
__ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
|
||||
} else {
|
||||
// We often have several deopts to the same entry, reuse the last
|
||||
@ -2706,16 +2737,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
|
||||
__ CallRuntime(Runtime::kTraceExit, 1);
|
||||
}
|
||||
if (info()->saves_caller_doubles()) {
|
||||
ASSERT(NeedsEagerFrame());
|
||||
BitVector* doubles = chunk()->allocated_double_registers();
|
||||
BitVector::Iterator save_iterator(doubles);
|
||||
int count = 0;
|
||||
while (!save_iterator.Done()) {
|
||||
__ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
|
||||
MemOperand(sp, count * kDoubleSize));
|
||||
save_iterator.Advance();
|
||||
count++;
|
||||
}
|
||||
RestoreCallerDoubles();
|
||||
}
|
||||
int no_frame_start = -1;
|
||||
if (NeedsEagerFrame()) {
|
||||
@ -3303,7 +3325,8 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
|
||||
__ Branch(&receiver_ok);
|
||||
|
||||
__ bind(&global_object);
|
||||
__ lw(receiver, GlobalObjectOperand());
|
||||
__ lw(receiver, MemOperand(fp, StandardFrameConstants::kContextOffset));
|
||||
__ lw(receiver, ContextOperand(receiver, Context::GLOBAL_OBJECT_INDEX));
|
||||
__ lw(receiver,
|
||||
FieldMemOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
|
||||
__ bind(&receiver_ok);
|
||||
|
3
deps/v8/src/mips/lithium-codegen-mips.h
vendored
3
deps/v8/src/mips/lithium-codegen-mips.h
vendored
@ -186,6 +186,9 @@ class LCodeGen: public LCodeGenBase {
|
||||
|
||||
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
|
||||
|
||||
void SaveCallerDoubles();
|
||||
void RestoreCallerDoubles();
|
||||
|
||||
// Code generation passes. Returns true if code generation should
|
||||
// continue.
|
||||
bool GeneratePrologue();
|
||||
|
16
deps/v8/src/mips/macro-assembler-mips.cc
vendored
16
deps/v8/src/mips/macro-assembler-mips.cc
vendored
@ -4601,15 +4601,15 @@ void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
|
||||
// Pre-age the code.
|
||||
Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
|
||||
nop(Assembler::CODE_AGE_MARKER_NOP);
|
||||
// Save the function's original return address
|
||||
// (it will be clobbered by Call(t9))
|
||||
mov(at, ra);
|
||||
// Load the stub address to t9 and call it
|
||||
// Load the stub address to t9 and call it,
|
||||
// GetCodeAgeAndParity() extracts the stub address from this instruction.
|
||||
li(t9,
|
||||
Operand(reinterpret_cast<uint32_t>(stub->instruction_start())));
|
||||
Call(t9);
|
||||
// Record the stub address in the empty space for GetCodeAgeAndParity()
|
||||
dd(reinterpret_cast<uint32_t>(stub->instruction_start()));
|
||||
Operand(reinterpret_cast<uint32_t>(stub->instruction_start())),
|
||||
CONSTANT_SIZE);
|
||||
nop(); // Prevent jalr to jal optimization.
|
||||
jalr(t9, a0);
|
||||
nop(); // Branch delay slot nop.
|
||||
nop(); // Pad the empty space.
|
||||
} else {
|
||||
Push(ra, fp, cp, a1);
|
||||
nop(Assembler::CODE_AGE_SEQUENCE_NOP);
|
||||
|
6
deps/v8/src/mips/macro-assembler-mips.h
vendored
6
deps/v8/src/mips/macro-assembler-mips.h
vendored
@ -1210,8 +1210,10 @@ class MacroAssembler: public Assembler {
|
||||
}
|
||||
|
||||
// Convenience function: Same as above, but takes the fid instead.
|
||||
void CallRuntime(Runtime::FunctionId id, int num_arguments) {
|
||||
CallRuntime(Runtime::FunctionForId(id), num_arguments);
|
||||
void CallRuntime(Runtime::FunctionId id,
|
||||
int num_arguments,
|
||||
SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
|
||||
CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
|
||||
}
|
||||
|
||||
// Convenience function: call an external reference.
|
||||
|
7
deps/v8/src/mips/simulator-mips.cc
vendored
7
deps/v8/src/mips/simulator-mips.cc
vendored
@ -1722,6 +1722,7 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
|
||||
int64_t& i64hilo,
|
||||
uint64_t& u64hilo,
|
||||
int32_t& next_pc,
|
||||
int32_t& return_addr_reg,
|
||||
bool& do_interrupt) {
|
||||
// Every local variable declared here needs to be const.
|
||||
// This is to make sure that changed values are sent back to
|
||||
@ -1782,6 +1783,7 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
|
||||
case JR:
|
||||
case JALR:
|
||||
next_pc = get_register(instr->RsValue());
|
||||
return_addr_reg = instr->RdValue();
|
||||
break;
|
||||
case SLL:
|
||||
alu_out = rt << sa;
|
||||
@ -1986,6 +1988,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
|
||||
int32_t current_pc = get_pc();
|
||||
// Next pc
|
||||
int32_t next_pc = 0;
|
||||
int32_t return_addr_reg = 31;
|
||||
|
||||
// Set up the variables if needed before executing the instruction.
|
||||
ConfigureTypeRegister(instr,
|
||||
@ -1993,6 +1996,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
|
||||
i64hilo,
|
||||
u64hilo,
|
||||
next_pc,
|
||||
return_addr_reg,
|
||||
do_interrupt);
|
||||
|
||||
// ---------- Raise exceptions triggered.
|
||||
@ -2258,7 +2262,8 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
|
||||
Instruction* branch_delay_instr = reinterpret_cast<Instruction*>(
|
||||
current_pc+Instruction::kInstrSize);
|
||||
BranchDelayInstructionDecode(branch_delay_instr);
|
||||
set_register(31, current_pc + 2 * Instruction::kInstrSize);
|
||||
set_register(return_addr_reg,
|
||||
current_pc + 2 * Instruction::kInstrSize);
|
||||
set_pc(next_pc);
|
||||
pc_modified_ = true;
|
||||
break;
|
||||
|
1
deps/v8/src/mips/simulator-mips.h
vendored
1
deps/v8/src/mips/simulator-mips.h
vendored
@ -289,6 +289,7 @@ class Simulator {
|
||||
int64_t& i64hilo,
|
||||
uint64_t& u64hilo,
|
||||
int32_t& next_pc,
|
||||
int32_t& return_addr_reg,
|
||||
bool& do_interrupt);
|
||||
|
||||
void DecodeTypeImmediate(Instruction* instr);
|
||||
|
12
deps/v8/src/objects.cc
vendored
12
deps/v8/src/objects.cc
vendored
@ -10332,6 +10332,18 @@ void Code::InvalidateRelocation() {
|
||||
}
|
||||
|
||||
|
||||
void Code::InvalidateEmbeddedObjects() {
|
||||
Object* undefined = GetHeap()->undefined_value();
|
||||
int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
|
||||
for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
|
||||
RelocInfo::Mode mode = it.rinfo()->rmode();
|
||||
if (mode == RelocInfo::EMBEDDED_OBJECT) {
|
||||
it.rinfo()->set_target_object(undefined, SKIP_WRITE_BARRIER);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Code::Relocate(intptr_t delta) {
|
||||
for (RelocIterator it(this, RelocInfo::kApplyMask); !it.done(); it.next()) {
|
||||
it.rinfo()->apply(delta);
|
||||
|
1
deps/v8/src/objects.h
vendored
1
deps/v8/src/objects.h
vendored
@ -5008,6 +5008,7 @@ class Code: public HeapObject {
|
||||
// [relocation_info]: Code relocation information
|
||||
DECL_ACCESSORS(relocation_info, ByteArray)
|
||||
void InvalidateRelocation();
|
||||
void InvalidateEmbeddedObjects();
|
||||
|
||||
// [handler_table]: Fixed array containing offsets of exception handlers.
|
||||
DECL_ACCESSORS(handler_table, FixedArray)
|
||||
|
2
deps/v8/src/store-buffer.cc
vendored
2
deps/v8/src/store-buffer.cc
vendored
@ -224,7 +224,7 @@ void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) {
|
||||
containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
|
||||
}
|
||||
int old_counter = containing_chunk->store_buffer_counter();
|
||||
if (old_counter == threshold) {
|
||||
if (old_counter >= threshold) {
|
||||
containing_chunk->set_scan_on_scavenge(true);
|
||||
created_new_scan_on_scavenge_pages = true;
|
||||
}
|
||||
|
2
deps/v8/src/version.cc
vendored
2
deps/v8/src/version.cc
vendored
@ -35,7 +35,7 @@
|
||||
#define MAJOR_VERSION 3
|
||||
#define MINOR_VERSION 22
|
||||
#define BUILD_NUMBER 24
|
||||
#define PATCH_LEVEL 10
|
||||
#define PATCH_LEVEL 17
|
||||
// Use 1 for candidates and 0 otherwise.
|
||||
// (Boolean macro values are not supported by all preprocessors.)
|
||||
#define IS_CANDIDATE_VERSION 0
|
||||
|
15
deps/v8/src/x64/builtins-x64.cc
vendored
15
deps/v8/src/x64/builtins-x64.cc
vendored
@ -663,7 +663,8 @@ void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
|
||||
}
|
||||
|
||||
|
||||
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
|
||||
static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
|
||||
SaveFPRegsMode save_doubles) {
|
||||
// Enter an internal frame.
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
@ -672,7 +673,7 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
|
||||
// stubs that tail call the runtime on deopts passing their parameters in
|
||||
// registers.
|
||||
__ Pushad();
|
||||
__ CallRuntime(Runtime::kNotifyStubFailure, 0);
|
||||
__ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
|
||||
__ Popad();
|
||||
// Tear down internal frame.
|
||||
}
|
||||
@ -682,6 +683,16 @@ void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
|
||||
}
|
||||
|
||||
|
||||
void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
|
||||
Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
|
||||
}
|
||||
|
||||
|
||||
void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
|
||||
Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
|
||||
}
|
||||
|
||||
|
||||
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
|
||||
Deoptimizer::BailoutType type) {
|
||||
// Enter an internal frame.
|
||||
|
5
deps/v8/src/x64/deoptimizer-x64.cc
vendored
5
deps/v8/src/x64/deoptimizer-x64.cc
vendored
@ -126,6 +126,11 @@ bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
|
||||
}
|
||||
|
||||
|
||||
Code* Deoptimizer::NotifyStubFailureBuiltin() {
|
||||
return isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles);
|
||||
}
|
||||
|
||||
|
||||
#define __ masm()->
|
||||
|
||||
void Deoptimizer::EntryGenerator::Generate() {
|
||||
|
64
deps/v8/src/x64/lithium-codegen-x64.cc
vendored
64
deps/v8/src/x64/lithium-codegen-x64.cc
vendored
@ -111,6 +111,38 @@ void LCodeGen::MakeSureStackPagesMapped(int offset) {
|
||||
#endif
|
||||
|
||||
|
||||
void LCodeGen::SaveCallerDoubles() {
|
||||
ASSERT(info()->saves_caller_doubles());
|
||||
ASSERT(NeedsEagerFrame());
|
||||
Comment(";;; Save clobbered callee double registers");
|
||||
int count = 0;
|
||||
BitVector* doubles = chunk()->allocated_double_registers();
|
||||
BitVector::Iterator save_iterator(doubles);
|
||||
while (!save_iterator.Done()) {
|
||||
__ movsd(MemOperand(rsp, count * kDoubleSize),
|
||||
XMMRegister::FromAllocationIndex(save_iterator.Current()));
|
||||
save_iterator.Advance();
|
||||
count++;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LCodeGen::RestoreCallerDoubles() {
|
||||
ASSERT(info()->saves_caller_doubles());
|
||||
ASSERT(NeedsEagerFrame());
|
||||
Comment(";;; Restore clobbered callee double registers");
|
||||
BitVector* doubles = chunk()->allocated_double_registers();
|
||||
BitVector::Iterator save_iterator(doubles);
|
||||
int count = 0;
|
||||
while (!save_iterator.Done()) {
|
||||
__ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
|
||||
MemOperand(rsp, count * kDoubleSize));
|
||||
save_iterator.Advance();
|
||||
count++;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool LCodeGen::GeneratePrologue() {
|
||||
ASSERT(is_generating());
|
||||
|
||||
@ -173,16 +205,7 @@ bool LCodeGen::GeneratePrologue() {
|
||||
}
|
||||
|
||||
if (info()->saves_caller_doubles()) {
|
||||
Comment(";;; Save clobbered callee double registers");
|
||||
int count = 0;
|
||||
BitVector* doubles = chunk()->allocated_double_registers();
|
||||
BitVector::Iterator save_iterator(doubles);
|
||||
while (!save_iterator.Done()) {
|
||||
__ movsd(MemOperand(rsp, count * kDoubleSize),
|
||||
XMMRegister::FromAllocationIndex(save_iterator.Current()));
|
||||
save_iterator.Advance();
|
||||
count++;
|
||||
}
|
||||
SaveCallerDoubles();
|
||||
}
|
||||
}
|
||||
|
||||
@ -261,6 +284,7 @@ bool LCodeGen::GenerateJumpTable() {
|
||||
Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
|
||||
}
|
||||
if (jump_table_[i].needs_frame) {
|
||||
ASSERT(!info()->saves_caller_doubles());
|
||||
__ movq(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
|
||||
if (needs_frame.is_bound()) {
|
||||
__ jmp(&needs_frame);
|
||||
@ -279,6 +303,10 @@ bool LCodeGen::GenerateJumpTable() {
|
||||
__ call(kScratchRegister);
|
||||
}
|
||||
} else {
|
||||
if (info()->saves_caller_doubles()) {
|
||||
ASSERT(info()->IsStub());
|
||||
RestoreCallerDoubles();
|
||||
}
|
||||
__ call(entry, RelocInfo::RUNTIME_ENTRY);
|
||||
}
|
||||
}
|
||||
@ -661,7 +689,10 @@ void LCodeGen::DeoptimizeIf(Condition cc,
|
||||
}
|
||||
|
||||
ASSERT(info()->IsStub() || frame_is_built_);
|
||||
if (cc == no_condition && frame_is_built_) {
|
||||
// Go through jump table if we need to handle condition, build frame, or
|
||||
// restore caller doubles.
|
||||
if (cc == no_condition && frame_is_built_ &&
|
||||
!info()->saves_caller_doubles()) {
|
||||
__ call(entry, RelocInfo::RUNTIME_ENTRY);
|
||||
} else {
|
||||
// We often have several deopts to the same entry, reuse the last
|
||||
@ -2551,16 +2582,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
|
||||
__ CallRuntime(Runtime::kTraceExit, 1);
|
||||
}
|
||||
if (info()->saves_caller_doubles()) {
|
||||
ASSERT(NeedsEagerFrame());
|
||||
BitVector* doubles = chunk()->allocated_double_registers();
|
||||
BitVector::Iterator save_iterator(doubles);
|
||||
int count = 0;
|
||||
while (!save_iterator.Done()) {
|
||||
__ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
|
||||
MemOperand(rsp, count * kDoubleSize));
|
||||
save_iterator.Advance();
|
||||
count++;
|
||||
}
|
||||
RestoreCallerDoubles();
|
||||
}
|
||||
int no_frame_start = -1;
|
||||
if (NeedsEagerFrame()) {
|
||||
|
4
deps/v8/src/x64/lithium-codegen-x64.h
vendored
4
deps/v8/src/x64/lithium-codegen-x64.h
vendored
@ -153,6 +153,10 @@ class LCodeGen: public LCodeGenBase {
|
||||
|
||||
void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
|
||||
|
||||
|
||||
void SaveCallerDoubles();
|
||||
void RestoreCallerDoubles();
|
||||
|
||||
// Code generation passes. Returns true if code generation should
|
||||
// continue.
|
||||
bool GeneratePrologue();
|
||||
|
6
deps/v8/src/x64/macro-assembler-x64.h
vendored
6
deps/v8/src/x64/macro-assembler-x64.h
vendored
@ -1248,8 +1248,10 @@ class MacroAssembler: public Assembler {
|
||||
}
|
||||
|
||||
// Convenience function: Same as above, but takes the fid instead.
|
||||
void CallRuntime(Runtime::FunctionId id, int num_arguments) {
|
||||
CallRuntime(Runtime::FunctionForId(id), num_arguments);
|
||||
void CallRuntime(Runtime::FunctionId id,
|
||||
int num_arguments,
|
||||
SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
|
||||
CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
|
||||
}
|
||||
|
||||
// Convenience function: call an external reference.
|
||||
|
32
deps/v8/test/mjsunit/regress/regress-280531.js
vendored
Normal file
32
deps/v8/test/mjsunit/regress/regress-280531.js
vendored
Normal file
@ -0,0 +1,32 @@
|
||||
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following
|
||||
// disclaimer in the documentation and/or other materials provided
|
||||
// with the distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived
|
||||
// from this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
var contextA = Realm.create();
|
||||
var date1 = Realm.eval(contextA, "new Date('Thu, 29 Aug 2013 00:00:00 UTC')");
|
||||
new Date('Thu, 29 Aug 2013 00:00:01 UTC');
|
||||
var date2 = Realm.eval(contextA, "new Date('Thu, 29 Aug 2013 00:00:00 UTC')");
|
||||
assertEquals(date1, date2);
|
42
deps/v8/test/mjsunit/regress/regress-3027.js
vendored
Normal file
42
deps/v8/test/mjsunit/regress/regress-3027.js
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following
|
||||
// disclaimer in the documentation and/or other materials provided
|
||||
// with the distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived
|
||||
// from this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Test to exceed the Heap::MaxRegularSpaceAllocationSize with an array
|
||||
// constructor call taking many arguments.
|
||||
|
||||
function boom() {
|
||||
var args = [];
|
||||
for (var i = 0; i < 125000; i++) {
|
||||
args.push(i);
|
||||
}
|
||||
return Array.apply(Array, args);
|
||||
}
|
||||
|
||||
var array = boom();
|
||||
|
||||
assertEquals(125000, array.length);
|
||||
assertEquals(124999, array[124999]);
|
46
deps/v8/test/mjsunit/regress/regress-318420.js
vendored
Normal file
46
deps/v8/test/mjsunit/regress/regress-318420.js
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following
|
||||
// disclaimer in the documentation and/or other materials provided
|
||||
// with the distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived
|
||||
// from this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Flags: --allow-natives-syntax
|
||||
|
||||
function bar(a, b) { with(a) {return a + b;} }
|
||||
|
||||
var obj = {
|
||||
functions: [bar, bar, bar, bar],
|
||||
receivers: [bar, bar, undefined, null],
|
||||
foo: function () {
|
||||
for (var a = this.functions, e = this.receivers, c = a.length,
|
||||
d = 0; d < c ; d++) {
|
||||
a[d].apply(e[d], arguments)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
obj.foo(1, 2, 3, 4);
|
||||
obj.foo(1, 2, 3, 4);
|
||||
%OptimizeFunctionOnNextCall(obj.foo);
|
||||
obj.foo(1, 2, 3, 4);
|
44
deps/v8/test/mjsunit/regress/regress-331444.js
vendored
Normal file
44
deps/v8/test/mjsunit/regress/regress-331444.js
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following
|
||||
// disclaimer in the documentation and/or other materials provided
|
||||
// with the distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived
|
||||
// from this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Flags: --expose-gc
|
||||
|
||||
function boom() {
|
||||
var args = [];
|
||||
for (var i = 0; i < 125000; i++)
|
||||
args.push(i);
|
||||
return Array.apply(Array, args);
|
||||
}
|
||||
var array = boom();
|
||||
function fib(n) {
|
||||
var f0 = 0, f1 = 1;
|
||||
for (; n > 0; n = n - 1) {
|
||||
f0 + f1;
|
||||
f0 = array;
|
||||
}
|
||||
}
|
||||
fib(12);
|
49
deps/v8/test/mjsunit/regress/regress-calls-with-migrating-prototypes.js
vendored
Normal file
49
deps/v8/test/mjsunit/regress/regress-calls-with-migrating-prototypes.js
vendored
Normal file
@ -0,0 +1,49 @@
|
||||
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following
|
||||
// disclaimer in the documentation and/or other materials provided
|
||||
// with the distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived
|
||||
// from this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Flags: --allow-natives-syntax
|
||||
|
||||
function f() {
|
||||
return 1;
|
||||
}
|
||||
function C1(f) {
|
||||
this.f = f;
|
||||
}
|
||||
var o1 = new C1(f);
|
||||
var o2 = {__proto__: new C1(f) }
|
||||
function foo(o) {
|
||||
return o.f();
|
||||
}
|
||||
foo(o1);
|
||||
foo(o1);
|
||||
foo(o2);
|
||||
foo(o1);
|
||||
var o3 = new C1(function() { return 2; });
|
||||
%OptimizeFunctionOnNextCall(foo);
|
||||
assertEquals(1, foo(o2));
|
||||
o2.__proto__.f = function() { return 3; };
|
||||
assertEquals(3, foo(o2));
|
Loading…
x
Reference in New Issue
Block a user